diff --git a/.github/.markdownlint.yaml b/.github/.markdownlint.yaml deleted file mode 100644 index 6a93d89c46ad..000000000000 --- a/.github/.markdownlint.yaml +++ /dev/null @@ -1,210 +0,0 @@ -# Default state for all rules -default: true - -# Path to configuration file to extend -extends: null - -# MD001/heading-increment/header-increment - Heading levels should only increment by one level at a time -MD001: true - -# MD002/first-heading-h1/first-header-h1 - First heading should be a top-level heading -MD002: - # Heading level - level: 1 - -# MD003/heading-style/header-style - Heading style -MD003: - # Heading style - style: "consistent" - -# MD004/ul-style - Unordered list style -MD004: - # List style - style: "consistent" - -# MD005/list-indent - Inconsistent indentation for list items at the same level -MD005: false - -# MD006/ul-start-left - Consider starting bulleted lists at the beginning of the line -MD006: false - -# MD007/ul-indent - Unordered list indentation -MD007: false - -# MD009/no-trailing-spaces - Trailing spaces -MD009: - # Spaces for line break - br_spaces: 2 - # Allow spaces for empty lines in list items - list_item_empty_lines: false - # Include unnecessary breaks - strict: false - -# MD010/no-hard-tabs - Hard tabs -MD010: false - -# MD011/no-reversed-links - Reversed link syntax -MD011: true - -# MD012/no-multiple-blanks - Multiple consecutive blank lines -MD012: - # Consecutive blank lines - maximum: 2 - -# MD013/line-length - Line length -MD013: - # Number of characters - line_length: 120 - # Number of characters for headings - heading_line_length: 120 - # Number of characters for code blocks - code_block_line_length: 150 - # Include code blocks - code_blocks: true - # Include tables - tables: true - # Include headings - headings: true - # Include headings - headers: true - # Strict length checking - strict: false - # Stern length checking - stern: false - -# MD014/commands-show-output - Dollar signs used before commands without showing output -MD014: true - -# MD018/no-missing-space-atx - No space after hash on atx style heading -MD018: true - -# MD019/no-multiple-space-atx - Multiple spaces after hash on atx style heading -MD019: true - -# MD020/no-missing-space-closed-atx - No space inside hashes on closed atx style heading -MD020: true - -# MD021/no-multiple-space-closed-atx - Multiple spaces inside hashes on closed atx style heading -MD021: true - -# MD022/blanks-around-headings/blanks-around-headers - Headings should be surrounded by blank lines -MD022: false - -# MD023/heading-start-left/header-start-left - Headings must start at the beginning of the line -MD023: true - -# MD024/no-duplicate-heading/no-duplicate-header - Multiple headings with the same content -MD024: false - -# MD025/single-title/single-h1 - Multiple top-level headings in the same document -MD025: false - -# MD026/no-trailing-punctuation - Trailing punctuation in heading -MD026: - # Punctuation characters - punctuation: ".,;:!。,;:!" - -# MD027/no-multiple-space-blockquote - Multiple spaces after blockquote symbol -MD027: true - -# MD028/no-blanks-blockquote - Blank line inside blockquote -MD028: true - -# MD029/ol-prefix - Ordered list item prefix -MD029: - # List style - style: "one_or_ordered" - -# MD030/list-marker-space - Spaces after list markers -MD030: - # Spaces for single-line unordered list items - ul_single: 1 - # Spaces for single-line ordered list items - ol_single: 1 - # Spaces for multi-line unordered list items - ul_multi: 1 - # Spaces for multi-line ordered list items - ol_multi: 1 - -# MD031/blanks-around-fences - Fenced code blocks should be surrounded by blank lines -MD031: false - -# MD032/blanks-around-lists - Lists should be surrounded by blank lines -MD032: false - -# MD033/no-inline-html - Inline HTML -MD033: false - -# MD034/no-bare-urls - Bare URL used -MD034: false - -# MD035/hr-style - Horizontal rule style -MD035: - # Horizontal rule style - style: "consistent" - -# MD036/no-emphasis-as-heading/no-emphasis-as-header - Emphasis used instead of a heading -MD036: false - -# MD037/no-space-in-emphasis - Spaces inside emphasis markers -MD037: true - -# MD038/no-space-in-code - Spaces inside code span elements -MD038: true - -# MD039/no-space-in-links - Spaces inside link text -MD039: true - -# MD040/fenced-code-language - Fenced code blocks should have a language specified -MD040: false - -# MD041/first-line-heading/first-line-h1 - First line in a file should be a top-level heading -MD041: false - -# MD042/no-empty-links - No empty links -MD042: true - -# MD043/required-headings/required-headers - Required heading structure -MD043: false - -# MD044/proper-names - Proper names should have the correct capitalization -MD044: - # List of proper names - names: ["Polkadot", "Substrate", "Cumulus", "Parity"] - # Include code blocks - code_blocks: false - # Include HTML elements - html_elements: false - -# MD045/no-alt-text - Images should have alternate text (alt text) -MD045: false - -# MD046/code-block-style - Code block style -MD046: - # Block style - style: "consistent" - -# MD047/single-trailing-newline - Files should end with a single newline character -MD047: true - -# MD048/code-fence-style - Code fence style -MD048: - # Code fence style - style: "consistent" - -# MD049/emphasis-style - Emphasis style should be consistent -MD049: false - -# MD050/strong-style - Strong style should be consistent -MD050: - # Strong style - style: "consistent" - -# MD051/link-fragments - Link fragments should be valid -MD051: false - -# MD052/reference-links-images - Reference links and images should use a label that is defined -MD052: false - -# MD053/link-image-reference-definitions - Link and image reference definitions should be needed -MD053: false diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index fdaa0c8628f7..84c68219d51f 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,75 +1 @@ -# Lists some code owners. -# -# A codeowner just oversees some part of the codebase. If an owned file is changed then the -# corresponding codeowner receives a review request. An approval of the codeowner might be -# required for merging a PR (depends on repository settings). -# -# For details about syntax, see: -# https://help.github.com/en/articles/about-code-owners -# But here are some important notes: -# -# - Glob syntax is git-like, e.g. `/core` means the core directory in the root, unlike `core` -# which can be everywhere. -# - Multiple owners are supported. -# - Either handle (e.g, @github_user or @github/team) or email can be used. Keep in mind, -# that handles might work better because they are more recognizable on GitHub, -# eyou can use them for mentioning unlike an email. -# - The latest matching rule, if multiple, takes precedence. - -# CI -/.github/ @paritytech/ci @paritytech/release-engineering -/.gitlab-ci.yml @paritytech/ci -/.gitlab/ @paritytech/ci - -# XCM -/polkadot/xcm/ @paritytech/xcm - -# WASM executor, low-level client <-> WASM interface and other WASM-related code -/substrate/client/allocator/ @koute -/substrate/client/executor/ @koute -/substrate/primitives/panic-handler/ @koute -/substrate/primitives/runtime-interface/ @koute -/substrate/primitives/wasm-interface/ @koute -/substrate/utils/wasm-builder/ @koute - -# Systems-related bits and bobs on the client side -/substrate/client/sysinfo/ @koute -/substrate/client/tracing/ @koute - -# Documentation audit -/substrate/primitives/runtime @paritytech/docs-audit -/substrate/primitives/arithmetic @paritytech/docs-audit -# /primitives/core (to be added later) -# /primitives/io (to be added later) - -# FRAME -/substrate/frame/ @paritytech/frame-coders @paritytech/docs-audit -/substrate/frame/nfts/ @jsidorenko @paritytech/docs-audit -/substrate/frame/state-trie-migration/ @paritytech/frame-coders @cheme -/substrate/frame/uniques/ @jsidorenko @paritytech/docs-audit - -# GRANDPA, BABE, consensus stuff -/substrate/client/consensus/babe/ @andresilva -/substrate/client/consensus/grandpa/ @andresilva -/substrate/client/consensus/pow/ @sorpaas -/substrate/client/consensus/slots/ @andresilva -/substrate/frame/babe/ @andresilva -/substrate/frame/grandpa/ @andresilva -/substrate/primitives/consensus/pow/ @sorpaas - -# BEEFY, MMR -/substrate/frame/beefy/ @acatangiu -/substrate/frame/beefy-mmr/ @acatangiu -/substrate/frame/merkle-mountain-range/ @acatangiu -/substrate/primitives/merkle-mountain-range/ @acatangiu - -# Contracts -/substrate/frame/contracts/ @athei @paritytech/docs-audit - -# NPoS and election -/substrate/frame/election-provider-multi-phase/ @paritytech/staking-core @paritytech/docs-audit -/substrate/frame/election-provider-support/ @paritytech/staking-core @paritytech/docs-audit -/substrate/frame/elections-phragmen/ @paritytech/staking-core @paritytech/docs-audit -/substrate/frame/nomination-pools/ @paritytech/staking-core @paritytech/docs-audit -/substrate/frame/staking/ @paritytech/staking-core @paritytech/docs-audit -/substrate/primitives/npos-elections/ @paritytech/staking-core @paritytech/docs-audit +/bridges/snowbridge/*.rs @Snowfork/snowbridge-devs diff --git a/.github/ISSUE_TEMPLATE/blank.md b/.github/ISSUE_TEMPLATE/blank.md deleted file mode 100644 index 2a9137e72802..000000000000 --- a/.github/ISSUE_TEMPLATE/blank.md +++ /dev/null @@ -1,4 +0,0 @@ ---- -name: New blank issue -about: New blank issue ---- diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml deleted file mode 100644 index f828a5d9d893..000000000000 --- a/.github/ISSUE_TEMPLATE/bug_report.yaml +++ /dev/null @@ -1,35 +0,0 @@ -name: Bug Report -description: Let us know about an issue you experienced with this software -labels: [ I2-bug, I10-unconfirmed ] - -body: - - type: checkboxes - attributes: - label: Is there an existing issue? - description: Please search to see if an issue already exists and leave a comment that you also experienced this issue or add your specifics that are related to an existing issue. - options: - - label: I have searched the existing issues - required: true - - type: checkboxes - attributes: - label: Experiencing problems? Have you tried our Stack Exchange first? - description: Please search to see if an post already exists, and ask if not. Please do not file support issues here. - options: - - label: This is not a support question. - required: true - - type: textarea - id: bug - attributes: - label: Description of bug - description: What seems to be the problem? - # placeholder: Describe the problem. - validations: - required: true - - type: textarea - id: steps - attributes: - label: Steps to reproduce - description: Provide the steps that led to the discovery of the issue. - # placeholder: Describe what you were doing so we can reproduce the problem. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml deleted file mode 100644 index e422e317411f..000000000000 --- a/.github/ISSUE_TEMPLATE/config.yml +++ /dev/null @@ -1,7 +0,0 @@ -blank_issues_enabled: true -contact_links: - - name: Support & Troubleshooting with the Substrate Stack Exchange Community - url: https://substrate.stackexchange.com - about: | - For general problems with Substrate or related technologies, please search here first - for solutions, by keyword and tags. If you discover no solution, please then ask and questions in our community! We highly encourage everyone also share their understanding by answering questions for others. diff --git a/.github/ISSUE_TEMPLATE/feature.yaml b/.github/ISSUE_TEMPLATE/feature.yaml deleted file mode 100644 index 828e8b461ccc..000000000000 --- a/.github/ISSUE_TEMPLATE/feature.yaml +++ /dev/null @@ -1,55 +0,0 @@ -name: Feature Request -description: Submit your requests and suggestions to improve! -labels: [ I5-enhancement ] -body: - - type: checkboxes - id: existing - attributes: - label: Is there an existing issue? - description: Please search to see if an issue already exists and leave a comment that you also experienced this issue or add your specifics that are related to an existing issue. - options: - - label: I have searched the existing issues - required: true - - type: checkboxes - id: stackexchange - attributes: - label: Experiencing problems? Have you tried our Stack Exchange first? - description: Please search to see if an post already exists, and ask if not. Please do not file support issues here. - options: - - label: This is not a support question. - required: true - - type: textarea - id: motivation - attributes: - label: Motivation - description: Please give precedence as to what lead you to file this issue. - # placeholder: Describe ... - validations: - required: false - - type: textarea - id: request - attributes: - label: Request - description: Please describe what is needed. - # placeholder: Describe what you would like to see added or changed. - validations: - required: true - - type: textarea - id: solution - attributes: - label: Solution - description: If possible, please describe what a solution could be. - # placeholder: Describe what you would like to see added or changed. - validations: - required: false - - type: dropdown - id: help - attributes: - label: Are you willing to help with this request? - multiple: true - options: - - Yes! - - No. - - Maybe (please elaborate above) - validations: - required: true diff --git a/.github/dependabot.yml b/.github/dependabot.yml deleted file mode 100644 index 3277a6e4607a..000000000000 --- a/.github/dependabot.yml +++ /dev/null @@ -1,27 +0,0 @@ -version: 2 -updates: - # Update github actions: - - package-ecosystem: github-actions - directory: '/' - labels: ["A1-insubstantial", "R0-silent"] - schedule: - interval: daily - # Update Rust dependencies: - - package-ecosystem: "cargo" - directory: "/" - labels: ["A1-insubstantial", "R0-silent"] - schedule: - interval: "daily" - groups: - # We assume these crates to be semver abiding and can therefore group them together. - known_good_semver: - patterns: - - "syn" - - "quote" - - "log" - - "paste" - - "*serde*" - - "clap" - update-types: - - "minor" - - "patch" diff --git a/.github/review-bot.yml b/.github/review-bot.yml deleted file mode 100644 index ed719cefec8b..000000000000 --- a/.github/review-bot.yml +++ /dev/null @@ -1,84 +0,0 @@ -rules: - - name: CI files - countAuthor: true - condition: - include: - - ^\.gitlab-ci\.yml - - ^docker/.* - - ^\.github/.* - - ^\.gitlab/.* - - ^\.config/nextest.toml - - ^\.cargo/.* - exclude: - - ^\.gitlab/pipeline/zombienet.* - type: "or" - reviewers: - - minApprovals: 2 - teams: - - ci - - minApprovals: 2 - teams: - - core-devs - - - name: Core developers - countAuthor: true - condition: - include: - - .* - # excluding files from 'Runtime files' and 'CI files' rules - exclude: - - ^cumulus/parachains/common/src/[^/]+\.rs$ - - ^substrate/frame/(?!.*(nfts/.*|uniques/.*|babe/.*|grandpa/.*|beefy|merkle-mountain-range/.*|contracts/.*|election|nomination-pools/.*|staking/.*|aura/.*)) - - ^\.gitlab-ci\.yml - - ^docker/.* - - ^\.github/.* - - ^\.gitlab/.* - - ^\.config/nextest.toml - - ^\.cargo/.* - minApprovals: 2 - type: basic - teams: - - core-devs - - # if there are any changes in the bridges subtree (in case of backport changes back to bridges repo) - - name: Bridges subtree files - type: basic - condition: - include: - - ^bridges/.* - minApprovals: 1 - teams: - - bridges-core - - # substrate - - name: FRAME coders substrate - condition: - include: - - ^substrate/frame/(?!.*(nfts/.*|uniques/.*|babe/.*|grandpa/.*|beefy|merkle-mountain-range/.*|contracts/.*|election|nomination-pools/.*|staking/.*|aura/.*)) - type: "and" - reviewers: - - minApprovals: 2 - teams: - - core-devs - - minApprovals: 1 - teams: - - frame-coders - - # Protection of THIS file - - name: Review Bot - countAuthor: true - condition: - include: - - review-bot\.yml - type: "and" - reviewers: - - minApprovals: 1 - teams: - - opstooling - - minApprovals: 1 - teams: - - locks-review - -preventReviewRequests: - teams: - - core-devs diff --git a/.github/runtime_specs/rococo.json b/.github/runtime_specs/rococo.json deleted file mode 100644 index 6568b06400c8..000000000000 --- a/.github/runtime_specs/rococo.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "pallets": { - "1": { - "constants": { - "EpochDuration": { - "value": [ 88, 2, 0, 0, 0, 0, 0, 0 ]} - } - }, - - "2": { - "constants": { - "MinimumPeriod": { - "value": [ 184, 11, 0, 0, 0, 0, 0, 0 ]} - } - } - } - } diff --git a/.github/runtime_specs/westend.json b/.github/runtime_specs/westend.json deleted file mode 100644 index 6568b06400c8..000000000000 --- a/.github/runtime_specs/westend.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "pallets": { - "1": { - "constants": { - "EpochDuration": { - "value": [ 88, 2, 0, 0, 0, 0, 0, 0 ]} - } - }, - - "2": { - "constants": { - "MinimumPeriod": { - "value": [ 184, 11, 0, 0, 0, 0, 0, 0 ]} - } - } - } - } diff --git a/.github/scripts/check-runtime.py b/.github/scripts/check-runtime.py deleted file mode 100755 index 9f3d047e01f8..000000000000 --- a/.github/scripts/check-runtime.py +++ /dev/null @@ -1,124 +0,0 @@ -#!/usr/bin/env python3 - -import json -import sys -import logging -import os - - -def check_constant(spec_pallet_id, spec_pallet_value, meta_constant): - """ - Check a single constant - - :param spec_pallet_id: - :param spec_pallet_value: - :param meta_constant: - :return: - """ - if meta_constant['name'] == list(spec_pallet_value.keys())[0]: - constant = meta_constant['name'] - res = list(spec_pallet_value.values())[0]["value"] == meta_constant["value"] - - logging.debug(f" Checking pallet:{spec_pallet_id}/constants/{constant}") - logging.debug(f" spec_pallet_value: {spec_pallet_value}") - logging.debug(f" meta_constant: {meta_constant}") - logging.info(f"pallet:{spec_pallet_id}/constants/{constant} -> {res}") - return res - else: - # logging.warning(f" Skipping pallet:{spec_pallet_id}/constants/{meta_constant['name']}") - pass - - -def check_pallet(metadata, spec_pallet): - """ - Check one pallet - - :param metadata: - :param spec_pallet_id: - :param spec_pallet_value: - :return: - """ - - spec_pallet_id, spec_pallet_value = spec_pallet - logging.debug(f"Pallet: {spec_pallet_id}") - - metadata_pallets = metadata["pallets"] - metadata_pallet = metadata_pallets[spec_pallet_id] - - res = map(lambda meta_constant_value: check_constant( - spec_pallet_id, spec_pallet_value["constants"], meta_constant_value), - metadata_pallet["constants"].values()) - res = list(filter(lambda item: item is not None, res)) - return all(res) - - -def check_pallets(metadata, specs): - """ - CHeck all pallets - - :param metadata: - :param specs: - :return: - """ - - res = list(map(lambda spec_pallet: check_pallet(metadata, spec_pallet), - specs['pallets'].items())) - res = list(filter(lambda item: item is not None, res)) - return all(res) - - -def check_metadata(metadata, specs): - """ - Check metadata (json) against a list of expectations - - :param metadata: Metadata in JSON format - :param expectation: Expectations - :return: Bool - """ - - res = check_pallets(metadata, specs) - return res - - -def help(): - """ Show some simple help """ - - print(f"You must pass 2 args, you passed {len(sys.argv) - 1}") - print("Sample call:") - print("check-runtime.py ") - - -def load_json(file): - """ Load json from a file """ - - f = open(file) - return json.load(f) - - -def main(): - LOGLEVEL = os.environ.get('LOGLEVEL', 'INFO').upper() - logging.basicConfig(level=LOGLEVEL) - - if len(sys.argv) != 3: - help() - exit(1) - - metadata_file = sys.argv[1] - specs_file = sys.argv[2] - print(f"Checking metadata from: {metadata_file} with specs from: {specs_file}") - - metadata = load_json(metadata_file) - specs = load_json(specs_file) - - res = check_metadata(metadata, specs) - - if res: - logging.info(f"OK") - exit(0) - else: - print("") - logging.info(f"Some errors were found, run again with LOGLEVEL=debug") - exit(1) - -if __name__ == "__main__": - main() diff --git a/.github/scripts/check-workspace.py b/.github/scripts/check-workspace.py deleted file mode 100644 index d200122fee9f..000000000000 --- a/.github/scripts/check-workspace.py +++ /dev/null @@ -1,173 +0,0 @@ -#!/usr/bin/env python3 - -# Ensures that: -# - all crates are added to the root workspace -# - local dependencies are resolved via `path` -# -# It does not check that the local paths resolve to the correct crate. This is already done by cargo. -# -# Must be called with a folder containing a `Cargo.toml` workspace file. - -import os -import sys -import toml -import argparse - -def parse_args(): - parser = argparse.ArgumentParser(description='Check Rust workspace integrity.') - - parser.add_argument('workspace_dir', help='The directory to check', metavar='workspace_dir', type=str, nargs=1) - parser.add_argument('--exclude', help='Exclude crate paths from the check', metavar='exclude', type=str, nargs='*', default=[]) - - args = parser.parse_args() - return (args.workspace_dir[0], args.exclude) - -def main(root, exclude): - workspace_crates = get_members(root, exclude) - all_crates = get_crates(root, exclude) - print(f'📦 Found {len(all_crates)} crates in total') - - check_duplicates(workspace_crates) - check_missing(workspace_crates, all_crates) - check_links(all_crates) - -# Extract all members from a workspace. -# Return: list of all workspace paths -def get_members(workspace_dir, exclude): - print(f'🔎 Indexing workspace {os.path.abspath(workspace_dir)}') - - root_manifest_path = os.path.join(workspace_dir, "Cargo.toml") - if not os.path.exists(root_manifest_path): - print(f'❌ No root manifest found at {root_manifest}') - sys.exit(1) - - root_manifest = toml.load(root_manifest_path) - if not 'workspace' in root_manifest: - print(f'❌ No workspace found in root {root_manifest_path}') - sys.exit(1) - - if not 'members' in root_manifest['workspace']: - return [] - - members = [] - for member in root_manifest['workspace']['members']: - if member in exclude: - print(f'❌ Excluded member should not appear in the workspace {member}') - sys.exit(1) - members.append(member) - - return members - -# List all members of the workspace. -# Return: Map name -> (path, manifest) -def get_crates(workspace_dir, exclude_crates) -> dict: - crates = {} - - for root, dirs, files in os.walk(workspace_dir): - if "target" in root: - continue - for file in files: - if file != "Cargo.toml": - continue - - path = os.path.join(root, file) - with open(path, "r") as f: - content = f.read() - manifest = toml.loads(content) - - if 'workspace' in manifest: - if root != workspace_dir: - print("⏩ Excluded recursive workspace at %s" % path) - continue - - # Cut off the root path and the trailing /Cargo.toml. - path = path[len(workspace_dir)+1:-11] - name = manifest['package']['name'] - if path in exclude_crates: - print("⏩ Excluded crate %s at %s" % (name, path)) - continue - crates[name] = (path, manifest) - - return crates - -# Check that there are no duplicate entries in the workspace. -def check_duplicates(workspace_crates): - print(f'🔎 Checking for duplicate crates') - found = {} - for path in workspace_crates: - if path in found: - print(f'❌ crate is listed twice in the workspace {path}') - sys.exit(1) - found[path] = True - -# Check that all crates are in the workspace. -def check_missing(workspace_crates, all_crates): - print(f'🔎 Checking for missing crates') - if len(workspace_crates) == len(all_crates): - print(f'✅ All {len(all_crates)} crates are in the workspace') - return - - missing = [] - # Find out which ones are missing. - for name, (path, manifest) in all_crates.items(): - if not path in workspace_crates: - missing.append([name, path, manifest]) - missing.sort() - - for name, path, _manifest in missing: - print("❌ %s in %s" % (name, path)) - print(f'😱 {len(all_crates) - len(workspace_crates)} crates are missing from the workspace') - sys.exit(1) - -# Check that all local dependencies are good. -def check_links(all_crates): - print(f'🔎 Checking for broken dependency links') - links = [] - broken = [] - - for name, (path, manifest) in all_crates.items(): - def check_deps(deps): - for dep in deps: - # Could be renamed: - dep_name = dep - if 'package' in deps[dep]: - dep_name = deps[dep]['package'] - if dep_name in all_crates: - links.append((name, dep_name)) - - if not 'path' in deps[dep]: - broken.append((name, dep_name, "crate must be linked via `path`")) - return - - def check_crate(deps): - to_checks = ['dependencies', 'dev-dependencies', 'build-dependencies'] - - for to_check in to_checks: - if to_check in deps: - check_deps(deps[to_check]) - - # There could possibly target dependant deps: - if 'target' in manifest: - # Target dependant deps can only have one level of nesting: - for _, target in manifest['target'].items(): - check_crate(target) - - check_crate(manifest) - - - - links.sort() - broken.sort() - - if len(broken) > 0: - for (l, r, reason) in broken: - print(f'❌ {l} -> {r} ({reason})') - - print("💥 %d out of %d links are broken" % (len(broken), len(links))) - sys.exit(1) - else: - print("✅ All %d internal dependency links are correct" % len(links)) - -if __name__ == "__main__": - args = parse_args() - main(args[0], args[1]) diff --git a/.github/scripts/common/lib.sh b/.github/scripts/common/lib.sh deleted file mode 100755 index bd12d9c6e6ff..000000000000 --- a/.github/scripts/common/lib.sh +++ /dev/null @@ -1,346 +0,0 @@ -#!/bin/sh - -api_base="https://api.github.com/repos" - -# Function to take 2 git tags/commits and get any lines from commit messages -# that contain something that looks like a PR reference: e.g., (#1234) -sanitised_git_logs(){ - git --no-pager log --pretty=format:"%s" "$1...$2" | - # Only find messages referencing a PR - grep -E '\(#[0-9]+\)' | - # Strip any asterisks - sed 's/^* //g' -} - -# Checks whether a tag on github has been verified -# repo: 'organization/repo' -# tagver: 'v1.2.3' -# Usage: check_tag $repo $tagver -check_tag () { - repo=$1 - tagver=$2 - if [ -n "$GITHUB_RELEASE_TOKEN" ]; then - echo '[+] Fetching tag using privileged token' - tag_out=$(curl -H "Authorization: token $GITHUB_RELEASE_TOKEN" -s "$api_base/$repo/git/refs/tags/$tagver") - else - echo '[+] Fetching tag using unprivileged token' - tag_out=$(curl -H "Authorization: token $GITHUB_PR_TOKEN" -s "$api_base/$repo/git/refs/tags/$tagver") - fi - tag_sha=$(echo "$tag_out" | jq -r .object.sha) - object_url=$(echo "$tag_out" | jq -r .object.url) - if [ "$tag_sha" = "null" ]; then - return 2 - fi - echo "[+] Tag object SHA: $tag_sha" - verified_str=$(curl -H "Authorization: token $GITHUB_RELEASE_TOKEN" -s "$object_url" | jq -r .verification.verified) - if [ "$verified_str" = "true" ]; then - # Verified, everything is good - return 0 - else - # Not verified. Bad juju. - return 1 - fi -} - -# Checks whether a given PR has a given label. -# repo: 'organization/repo' -# pr_id: 12345 -# label: B1-silent -# Usage: has_label $repo $pr_id $label -has_label(){ - repo="$1" - pr_id="$2" - label="$3" - - # These will exist if the function is called in Gitlab. - # If the function's called in Github, we should have GITHUB_ACCESS_TOKEN set - # already. - if [ -n "$GITHUB_RELEASE_TOKEN" ]; then - GITHUB_TOKEN="$GITHUB_RELEASE_TOKEN" - elif [ -n "$GITHUB_PR_TOKEN" ]; then - GITHUB_TOKEN="$GITHUB_PR_TOKEN" - fi - - out=$(curl -H "Authorization: token $GITHUB_TOKEN" -s "$api_base/$repo/pulls/$pr_id") - [ -n "$(echo "$out" | tr -d '\r\n' | jq ".labels | .[] | select(.name==\"$label\")")" ] -} - -github_label () { - echo - echo "# run github-api job for labeling it ${1}" - curl -sS -X POST \ - -F "token=${CI_JOB_TOKEN}" \ - -F "ref=master" \ - -F "variables[LABEL]=${1}" \ - -F "variables[PRNO]=${CI_COMMIT_REF_NAME}" \ - -F "variables[PROJECT]=paritytech/polkadot" \ - "${GITLAB_API}/projects/${GITHUB_API_PROJECT}/trigger/pipeline" -} - -# Formats a message into a JSON string for posting to Matrix -# message: 'any plaintext message' -# formatted_message: 'optional message formatted in html' -# Usage: structure_message $content $formatted_content (optional) -structure_message() { - if [ -z "$2" ]; then - body=$(jq -Rs --arg body "$1" '{"msgtype": "m.text", $body}' < /dev/null) - else - body=$(jq -Rs --arg body "$1" --arg formatted_body "$2" '{"msgtype": "m.text", $body, "format": "org.matrix.custom.html", $formatted_body}' < /dev/null) - fi - echo "$body" -} - -# Post a message to a matrix room -# body: '{body: "JSON string produced by structure_message"}' -# room_id: !fsfSRjgjBWEWffws:matrix.parity.io -# access_token: see https://matrix.org/docs/guides/client-server-api/ -# Usage: send_message $body (json formatted) $room_id $access_token -send_message() { - curl -XPOST -d "$1" "https://m.parity.io/_matrix/client/r0/rooms/$2/send/m.room.message?access_token=$3" -} - -# Pretty-printing functions -boldprint () { printf "|\n| \033[1m%s\033[0m\n|\n" "${@}"; } -boldcat () { printf "|\n"; while read -r l; do printf "| \033[1m%s\033[0m\n" "${l}"; done; printf "|\n" ; } - -skip_if_companion_pr() { - url="https://api.github.com/repos/paritytech/polkadot/pulls/${CI_COMMIT_REF_NAME}" - echo "[+] API URL: $url" - - pr_title=$(curl -sSL -H "Authorization: token ${GITHUB_PR_TOKEN}" "$url" | jq -r .title) - echo "[+] PR title: $pr_title" - - if echo "$pr_title" | grep -qi '^companion'; then - echo "[!] PR is a companion PR. Build is already done in substrate" - exit 0 - else - echo "[+] PR is not a companion PR. Proceeding test" - fi -} - -# Fetches the tag name of the latest release from a repository -# repo: 'organisation/repo' -# Usage: latest_release 'paritytech/polkadot' -latest_release() { - curl -s "$api_base/$1/releases/latest" | jq -r '.tag_name' -} - -# Check for runtime changes between two commits. This is defined as any changes -# to /primitives/src/* and any *production* chains under /runtime -has_runtime_changes() { - from=$1 - to=$2 - - if git diff --name-only "${from}...${to}" \ - | grep -q -e '^runtime/polkadot' -e '^runtime/kusama' -e '^primitives/src/' -e '^runtime/common' - then - return 0 - else - return 1 - fi -} - -# given a bootnode and the path to a chainspec file, this function will create a new chainspec file -# with only the bootnode specified and test whether that bootnode provides peers -# The optional third argument is the index of the bootnode in the list of bootnodes, this is just used to pick an ephemeral -# port for the node to run on. If you're only testing one, it'll just use the first ephemeral port -# BOOTNODE: /dns/polkadot-connect-0.parity.io/tcp/443/wss/p2p/12D3KooWEPmjoRpDSUuiTjvyNDd8fejZ9eNWH5bE965nyBMDrB4o -# CHAINSPEC_FILE: /path/to/polkadot.json -check_bootnode(){ - BOOTNODE=$1 - BASE_CHAINSPEC=$2 - RUNTIME=$(basename "$BASE_CHAINSPEC" | cut -d '.' -f 1) - MIN_PEERS=1 - - # Generate a temporary chainspec file containing only the bootnode we care about - TMP_CHAINSPEC_FILE="$RUNTIME.$(echo "$BOOTNODE" | tr '/' '_').tmp.json" - jq ".bootNodes = [\"$BOOTNODE\"] " < "$CHAINSPEC_FILE" > "$TMP_CHAINSPEC_FILE" - - # Grab an unused port by binding to port 0 and then immediately closing the socket - # This is a bit of a hack, but it's the only way to do it in the shell - RPC_PORT=$(python -c "import socket; s=socket.socket(); s.bind(('', 0)); print(s.getsockname()[1]); s.close()") - - echo "[+] Checking bootnode $BOOTNODE" - polkadot --chain "$TMP_CHAINSPEC_FILE" --no-mdns --rpc-port="$RPC_PORT" --tmp > /dev/null 2>&1 & - # Wait a few seconds for the node to start up - sleep 5 - POLKADOT_PID=$! - - MAX_POLLS=10 - TIME_BETWEEN_POLLS=3 - for _ in $(seq 1 "$MAX_POLLS"); do - # Check the health endpoint of the RPC node - PEERS="$(curl -s -X POST -H "Content-Type: application/json" --data '{"jsonrpc":"2.0","method":"system_health","params":[],"id":1}' http://localhost:"$RPC_PORT" | jq -r '.result.peers')" - # Sometimes due to machine load or other reasons, we don't get a response from the RPC node - # If $PEERS is an empty variable, make it 0 so we can still do the comparison - if [ -z "$PEERS" ]; then - PEERS=0 - fi - if [ "$PEERS" -ge $MIN_PEERS ]; then - echo "[+] $PEERS peers found for $BOOTNODE" - echo " Bootnode appears contactable" - kill $POLKADOT_PID - # Delete the temporary chainspec file now we're done running the node - rm "$TMP_CHAINSPEC_FILE" - return 0 - fi - sleep "$TIME_BETWEEN_POLLS" - done - kill $POLKADOT_PID - # Delete the temporary chainspec file now we're done running the node - rm "$TMP_CHAINSPEC_FILE" - echo "[!] No peers found for $BOOTNODE" - echo " Bootnode appears unreachable" - return 1 -} - -# Assumes the ENV are set: -# - RELEASE_ID -# - GITHUB_TOKEN -# - REPO in the form paritytech/polkadot -fetch_release_artifacts() { - echo "Release ID : $RELEASE_ID" - echo "Repo : $REPO" - echo "Binary : $BINARY" - OUTPUT_DIR=${OUTPUT_DIR:-"./release-artifacts/${BINARY}"} - echo "OUTPUT_DIR : $OUTPUT_DIR" - - echo "Fetching release info..." - curl -L -s \ - -H "Accept: application/vnd.github+json" \ - -H "Authorization: Bearer ${GITHUB_TOKEN}" \ - -H "X-GitHub-Api-Version: 2022-11-28" \ - https://api.github.com/repos/${REPO}/releases/${RELEASE_ID} > release.json - - echo "Extract asset ids..." - ids=($(jq -r '.assets[].id' < release.json )) - echo "Extract asset count..." - count=$(jq '.assets|length' < release.json ) - - # Fetch artifacts - mkdir -p "$OUTPUT_DIR" - pushd "$OUTPUT_DIR" > /dev/null - - echo "Fetching assets..." - iter=1 - for id in "${ids[@]}" - do - echo " - $iter/$count: downloading asset id: $id..." - curl -s -OJ -L -H "Accept: application/octet-stream" \ - -H "Authorization: Token ${GITHUB_TOKEN}" \ - "https://api.github.com/repos/${REPO}/releases/assets/$id" - iter=$((iter + 1)) - done - - pwd - ls -al --color - popd > /dev/null -} - -# Check the checksum for a given binary -function check_sha256() { - echo "Checking SHA256 for $1" - shasum -qc $1.sha256 -} - -# Import GPG keys of the release team members -# This is done in parallel as it can take a while sometimes -function import_gpg_keys() { - GPG_KEYSERVER=${GPG_KEYSERVER:-"keyserver.ubuntu.com"} - SEC="9D4B2B6EB8F97156D19669A9FF0812D491B96798" - WILL="2835EAF92072BC01D188AF2C4A092B93E97CE1E2" - EGOR="E6FC4D4782EB0FA64A4903CCDB7D3555DD3932D3" - MARA="533C920F40E73A21EEB7E9EBF27AEA7E7594C9CF" - MORGAN="2E92A9D8B15D7891363D1AE8AF9E6C43F7F8C4CF" - - echo "Importing GPG keys from $GPG_KEYSERVER in parallel" - for key in $SEC $WILL $EGOR $MARA $MORGAN; do - ( - echo "Importing GPG key $key" - gpg --no-tty --quiet --keyserver $GPG_KEYSERVER --recv-keys $key - echo -e "5\ny\n" | gpg --no-tty --command-fd 0 --expert --edit-key $key trust; - ) & - done - wait -} - -# Check the GPG signature for a given binary -function check_gpg() { - echo "Checking GPG Signature for $1" - gpg --no-tty --verify -q $1.asc $1 -} - -# GITHUB_REF will typically be like: -# - refs/heads/release-v1.2.3 -# - refs/heads/release-polkadot-v1.2.3-rc2 -# This function extracts the version -function get_version_from_ghref() { - GITHUB_REF=$1 - stripped=${GITHUB_REF#refs/heads/release-} - re="v([0-9]+\.[0-9]+\.[0-9]+)" - if [[ $stripped =~ $re ]]; then - echo ${BASH_REMATCH[0]}; - return 0 - else - return 1 - fi -} - -# Get latest rc tag based on the release version and product -function get_latest_rc_tag() { - version=$1 - product=$2 - - if [[ "$product" == "polkadot" ]]; then - last_rc=$(git tag -l "$version-rc*" | sort -V | tail -n 1) - elif [[ "$product" == "polkadot-parachain" ]]; then - last_rc=$(git tag -l "polkadot-parachains-$version-rc*" | sort -V | tail -n 1) - fi - echo "${last_rc}" -} - -# Increment rc tag number based on the value of a suffix of the current rc tag -function increment_rc_tag() { - last_rc=$1 - - suffix=$(echo "$last_rc" | grep -Eo '[0-9]+$') - ((suffix++)) - echo $suffix -} - -function relative_parent() { - echo "$1" | sed -E 's/(.*)\/(.*)\/\.\./\1/g' -} - -# Find all the runtimes, it returns the result as JSON object, compatible to be -# used as Github Workflow Matrix. This call is exposed by the `scan` command and can be used as: -# podman run --rm -it -v /.../fellowship-runtimes:/build docker.io/chevdor/srtool:1.70.0-0.11.1 scan -function find_runtimes() { - libs=($(git grep -I -r --cached --max-depth 20 --files-with-matches 'construct_runtime!' -- '*lib.rs')) - re=".*-runtime$" - JSON=$(jq --null-input '{ "include": [] }') - - # EXCLUDED_RUNTIMES is a space separated list of runtime names (without the -runtime postfix) - # EXCLUDED_RUNTIMES=${EXCLUDED_RUNTIMES:-"substrate-test"} - IFS=' ' read -r -a exclusions <<< "$EXCLUDED_RUNTIMES" - - for lib in "${libs[@]}"; do - crate_dir=$(dirname "$lib") - cargo_toml="$crate_dir/../Cargo.toml" - - name=$(toml get -r $cargo_toml 'package.name') - chain=${name//-runtime/} - - if [[ "$name" =~ $re ]] && ! [[ ${exclusions[@]} =~ $chain ]]; then - lib_dir=$(dirname "$lib") - runtime_dir=$(relative_parent "$lib_dir/..") - ITEM=$(jq --null-input \ - --arg chain "$chain" \ - --arg name "$name" \ - --arg runtime_dir "$runtime_dir" \ - '{ "chain": $chain, "crate": $name, "runtime_dir": $runtime_dir }') - JSON=$(echo $JSON | jq ".include += [$ITEM]") - fi - done - echo $JSON -} diff --git a/.github/workflows/auto-label-issues.yml b/.github/workflows/auto-label-issues.yml deleted file mode 100644 index 12ffce702cdc..000000000000 --- a/.github/workflows/auto-label-issues.yml +++ /dev/null @@ -1,17 +0,0 @@ -# If the author of the issues is not a contributor to the project, label -# the issue with 'Z0-unconfirmed' - -name: Label New Issues -on: - issues: - types: [opened] - -jobs: - label-new-issues: - runs-on: ubuntu-latest - steps: - - name: Label drafts - uses: andymckay/labeler@e6c4322d0397f3240f0e7e30a33b5c5df2d39e90 # 1.0.4 - if: github.event.issue.author_association == 'NONE' - with: - add-labels: "I10-unconfirmed" diff --git a/.github/workflows/build-and-attach-release-runtimes.yml b/.github/workflows/build-and-attach-release-runtimes.yml deleted file mode 100644 index 680a9ecffd31..000000000000 --- a/.github/workflows/build-and-attach-release-runtimes.yml +++ /dev/null @@ -1,65 +0,0 @@ -name: Build and Attach Runtimes to Releases/RC - -on: - release: - types: - - published - -env: - PROFILE: production - -jobs: - build_and_upload: - strategy: - matrix: - runtime: - - { name: westend, package: westend-runtime, path: polkadot/runtime/westend } - - { name: rococo, package: rococo-runtime, path: polkadot/runtime/rococo } - - { name: asset-hub-rococo, package: asset-hub-rococo-runtime, path: cumulus/parachains/runtimes/assets/asset-hub-rococo } - - { name: asset-hub-westend, package: asset-hub-westend-runtime, path: cumulus/parachains/runtimes/assets/asset-hub-westend } - - { name: bridge-hub-rococo, package: bridge-hub-rococo-runtime, path: cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo } - - { name: contracts-rococo, package: contracts-rococo-runtime, path: cumulus/parachains/runtimes/contracts/contracts-rococo } - - { name: collectives-westend, package: collectives-westend-runtime, path: cumulus/parachains/runtimes/collectives/collectives-westend } - - { name: glutton-westend, package: glutton-westend-runtime, path: cumulus/parachains/runtimes/glutton/glutton-westend } - build_config: - # Release build has logging disabled and no dev features - - { type: on-chain-release, opts: --features on-chain-release-build } - # Debug build has logging enabled and developer features - - { type: dev-debug-build, opts: --features try-runtime } - - runs-on: ubuntu-22.04 - - steps: - - name: Checkout code - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - - name: Build ${{ matrix.runtime.name }} ${{ matrix.build_config.type }} - id: srtool_build - uses: chevdor/srtool-actions@v0.9.2 - env: - BUILD_OPTS: ${{ matrix.build_config.opts }} - with: - chain: ${{ matrix.runtime.name }} - package: ${{ matrix.runtime.package }} - runtime_dir: ${{ matrix.runtime.path }} - profile: ${{ env.PROFILE }} - - - name: Set up paths and runtime names - id: setup - run: | - RUNTIME_BLOB_NAME=$(echo ${{ matrix.runtime.package }} | sed 's/-/_/g').compact.compressed.wasm - PREFIX=${{ matrix.build_config.type == 'dev-debug-build' && 'DEV_DEBUG_BUILD__' || '' }} - - echo "RUNTIME_BLOB_NAME=$RUNTIME_BLOB_NAME" >> $GITHUB_ENV - echo "ASSET_PATH=./${{ matrix.runtime.path }}/target/srtool/${{ env.PROFILE }}/wbuild/${{ matrix.runtime.package }}/$RUNTIME_BLOB_NAME" >> $GITHUB_ENV - echo "ASSET_NAME=$PREFIX$RUNTIME_BLOB_NAME" >> $GITHUB_ENV - - - name: Upload Runtime to Release - uses: actions/upload-release-asset@v1 - with: - upload_url: ${{ github.event.release.upload_url }} - asset_path: ${{ env.ASSET_PATH }} - asset_name: ${{ env.ASSET_NAME }} - asset_content_type: application/octet-stream - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/check-features.yml b/.github/workflows/check-features.yml deleted file mode 100644 index 53d6ac6b4dbf..000000000000 --- a/.github/workflows/check-features.yml +++ /dev/null @@ -1,19 +0,0 @@ -name: Check Features - -on: - pull_request: - types: [opened, synchronize, reopened, ready_for_review] - -jobs: - check-features: - runs-on: ubuntu-latest - steps: - - name: Fetch latest code - uses: actions/checkout@v4 - - name: Check - uses: hack-ink/cargo-featalign-action@bea88a864d6ca7d0c53c26f1391ce1d431dc7f34 # v0.1.1 - with: - crate: substrate/bin/node/runtime - features: std,runtime-benchmarks,try-runtime - ignore: sc-executor - default-std: true diff --git a/.github/workflows/check-labels.yml b/.github/workflows/check-labels.yml deleted file mode 100644 index 97562f0da095..000000000000 --- a/.github/workflows/check-labels.yml +++ /dev/null @@ -1,57 +0,0 @@ -name: Check labels - -on: - pull_request: - types: [labeled, opened, synchronize, unlabeled] - merge_group: - -jobs: - check-labels: - runs-on: ubuntu-latest - steps: - - name: Skip merge queue - if: ${{ contains(github.ref, 'gh-readonly-queue') }} - run: exit 0 - - name: Pull image - env: - IMAGE: paritytech/ruled_labels:0.4.0 - run: docker pull $IMAGE - - - name: Check labels - env: - IMAGE: paritytech/ruled_labels:0.4.0 - MOUNT: /work - GITHUB_PR: ${{ github.event.pull_request.number }} - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - API_BASE: https://api.github.com/repos - REPO: ${{ github.repository }} - RULES_PATH: labels/ruled_labels - CHECK_SPECS: "specs_polkadot-sdk.yaml" - run: | - echo "REPO: ${REPO}" - echo "GITHUB_PR: ${GITHUB_PR}" - - # Clone repo with labels specs - echo "Cloning repo with labels specs" - - # Temporary, before https://github.com/paritytech/labels/pull/29 is not merged - git clone https://github.com/paritytech/labels - - # Fetch the labels for the PR under test - echo "Fetch the labels for $API_BASE/${REPO}/pulls/${GITHUB_PR}" - labels=$( curl -H "Authorization: token ${GITHUB_TOKEN}" -s "$API_BASE/${REPO}/pulls/${GITHUB_PR}" | jq '.labels | .[] | .name' | tr "\n" ",") - echo "Labels: ${labels}" - - if [ -z "${labels}" ]; then - echo "No labels found, checking without them" - docker run --rm -i -v $PWD/${RULES_PATH}/:$MOUNT $IMAGE check $MOUNT/$CHECK_SPECS --no-label - fi - - labels_args=${labels: :-1} - printf "Checking labels: %s\n" "${labels_args}" - - # Prevent the shell from splitting labels with spaces - IFS="," - - # --dev is more useful to debug mode to debug - docker run --rm -i -v $PWD/${RULES_PATH}/:$MOUNT $IMAGE check $MOUNT/$CHECK_SPECS --labels ${labels_args} --dev --tags PR diff --git a/.github/workflows/check-licenses.yml b/.github/workflows/check-licenses.yml deleted file mode 100644 index e1e92d288cea..000000000000 --- a/.github/workflows/check-licenses.yml +++ /dev/null @@ -1,46 +0,0 @@ -name: Check licenses - -on: - pull_request: - merge_group: - -permissions: - packages: read - -jobs: - check-licenses: - runs-on: ubuntu-latest - env: - LICENSES: "'Apache-2.0' 'GPL-3.0-only' 'GPL-3.0-or-later WITH Classpath-exception-2.0'" - NODE_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - steps: - - name: Checkout sources - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - uses: actions/setup-node@v4.0.1 - with: - node-version: "18.x" - registry-url: "https://npm.pkg.github.com" - scope: "@paritytech" - - - name: Check the licenses in Polkadot - run: | - shopt -s globstar - npx @paritytech/license-scanner scan \ - --ensure-licenses ${{ env.LICENSES }} \ - -- ./polkadot/**/*.rs - - - name: Check the licenses in Cumulus - run: | - shopt -s globstar - npx @paritytech/license-scanner scan \ - --ensure-licenses ${{ env.LICENSES }} \ - --exclude ./cumulus/parachain-template \ - -- ./cumulus/**/*.rs - - - name: Check the licenses in Substrate - run: | - shopt -s globstar - npx @paritytech/license-scanner scan \ - --ensure-licenses ${{ env.LICENSES }} \ - --exclude ./substrate/bin/node-template \ - -- ./substrate/**/*.rs diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml deleted file mode 100644 index 903d7a3fcb3d..000000000000 --- a/.github/workflows/check-links.yml +++ /dev/null @@ -1,41 +0,0 @@ -name: Check links - -on: - pull_request: - paths: - - "*.rs" - - "*.prdoc" - - ".github/workflows/check-links.yml" - - ".config/lychee.toml" - types: [opened, synchronize, reopened, ready_for_review] - merge_group: - -permissions: - packages: read - -jobs: - link-checker: - runs-on: ubuntu-latest - steps: - - name: Restore lychee cache - uses: actions/cache@e12d46a63a90f2fae62d114769bbf2a179198b5c # v3.3.2 (7. Sep 2023) - with: - path: .lycheecache - key: cache-lychee-${{ github.sha }} - # This should restore from the most recent one: - restore-keys: cache-lychee- - - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.0 (22. Sep 2023) - - - name: Lychee link checker - uses: lycheeverse/lychee-action@c3089c702fbb949e3f7a8122be0c33c017904f9b # for v1.9.1 (10. Jan 2024) - with: - args: >- - --config .config/lychee.toml - --no-progress - './**/*.rs' - './**/*.prdoc' - fail: true - env: - # To bypass GitHub rate-limit: - GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} diff --git a/.github/workflows/check-markdown.yml b/.github/workflows/check-markdown.yml deleted file mode 100644 index 2b8a66db35b3..000000000000 --- a/.github/workflows/check-markdown.yml +++ /dev/null @@ -1,35 +0,0 @@ -name: Check Markdown - -on: - pull_request: - types: [opened, synchronize, reopened, ready_for_review] - merge_group: - -permissions: - packages: read - -jobs: - lint-markdown: - runs-on: ubuntu-latest - - steps: - - name: Checkout sources - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - - uses: actions/setup-node@v4.0.1 - with: - node-version: "18.x" - registry-url: "https://npm.pkg.github.com" - scope: "@paritytech" - - - name: Install tooling - run: | - npm install -g markdownlint-cli - markdownlint --version - - - name: Check Markdown - env: - CONFIG: .github/.markdownlint.yaml - run: | - echo "Checking markdown formatting. More info: docs/contributor/markdown_linting.md" - markdownlint --config "$CONFIG" --ignore target . diff --git a/.github/workflows/check-prdoc.yml b/.github/workflows/check-prdoc.yml deleted file mode 100644 index f47404744a49..000000000000 --- a/.github/workflows/check-prdoc.yml +++ /dev/null @@ -1,65 +0,0 @@ -name: Check PRdoc - -on: - pull_request: - types: [labeled, opened, synchronize, unlabeled] - merge_group: - -env: - IMAGE: docker.io/paritytech/prdoc:v0.0.7 - API_BASE: https://api.github.com/repos - REPO: ${{ github.repository }} - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - GITHUB_PR: ${{ github.event.pull_request.number }} - ENGINE: docker - PRDOC_DOC: https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/prdoc.md - -jobs: - check-prdoc: - runs-on: ubuntu-latest - steps: - # we cannot show the version in this step (ie before checking out the repo) - # due to https://github.com/paritytech/prdoc/issues/15 - - name: Skip merge queue - if: ${{ contains(github.ref, 'gh-readonly-queue') }} - run: exit 0 - - name: Pull image - run: | - echo "Pulling $IMAGE" - $ENGINE pull $IMAGE - - - name: Check if PRdoc is required - id: get-labels - run: | - # Fetch the labels for the PR under test - echo "Fetch the labels for $API_BASE/${REPO}/pulls/${GITHUB_PR}" - labels=$( curl -H "Authorization: token ${GITHUB_TOKEN}" -s "$API_BASE/${REPO}/pulls/${GITHUB_PR}" | jq '.labels | .[] | .name' | tr "\n" ",") - echo "Labels: ${labels}" - echo "labels=${labels}" >> "$GITHUB_OUTPUT" - - - name: Checkout repo - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 #v4.1.1 - - - name: Check PRDoc version - run: | - $ENGINE run --rm -v $PWD:/repo $IMAGE --version - - - name: Early exit if PR is silent - if: ${{ contains(steps.get-labels.outputs.labels, 'R0') }} - run: | - hits=$(find prdoc -name "pr_$GITHUB_PR*.prdoc" | wc -l) - if (( hits > 0 )); then - echo "PR detected as silent, but a PRDoc was found, checking it as information" - $ENGINE run --rm -v $PWD:/repo $IMAGE check -n ${GITHUB_PR} || echo "Ignoring failure" - else - echo "PR detected as silent, no PRDoc found, exiting..." - fi - echo "If you want to add a PRDoc, please refer to $PRDOC_DOC" - exit 0 - - - name: PRdoc check for PR#${{ github.event.pull_request.number }} - if: ${{ !contains(steps.get-labels.outputs.labels, 'R0') }} - run: | - echo "Checking for PR#${GITHUB_PR}" - echo "You can find more information about PRDoc at $PRDOC_DOC" - $ENGINE run --rm -v $PWD:/repo $IMAGE check -n ${GITHUB_PR} diff --git a/.github/workflows/check-publish.yml b/.github/workflows/check-publish.yml deleted file mode 100644 index b16b3d4e5c5c..000000000000 --- a/.github/workflows/check-publish.yml +++ /dev/null @@ -1,26 +0,0 @@ -name: Check publish - -on: - push: - branches: - - master - pull_request: - types: [opened, synchronize, reopened, ready_for_review] - merge_group: - -jobs: - check-publish: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - - name: Rust Cache - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 - with: - cache-on-failure: true - - - name: install parity-publish - run: cargo install parity-publish@0.3.0 - - - name: parity-publish check - run: parity-publish check --allow-unpublished diff --git a/.github/workflows/check-runtimes.yml b/.github/workflows/check-runtimes.yml deleted file mode 100644 index 0e5ad104766a..000000000000 --- a/.github/workflows/check-runtimes.yml +++ /dev/null @@ -1,94 +0,0 @@ -name: Check Runtimes Specs -# This GH Workflow fetches the runtimes available in a release. -# It then compares their metadata with reference specs located under -# .github/runtime_specs. - -on: - workflow_dispatch: - inputs: - release_id: - description: | - Release ID. - You can find it using the command: - curl -s \ - -H "Authorization: Bearer ${GITHUB_TOKEN}" https://api.github.com/repos/paritytech/polkadot-sdk/releases | \ - jq '.[] | { name: .name, id: .id }' - required: true - type: string - - # This trigger unfortunately does not work as expected. - # https://github.com/orgs/community/discussions/47794 - # release: - # types: [edited] - -env: - RUNTIME_SPECS_DIR: .github/runtime_specs - DATA_DIR: runtimes - RELEASE_ID: ${{ inputs.release_id }} - REPO: ${{ github.repository }} - -jobs: - find-specs: - name: Fetch runtime specs - outputs: - specs: ${{ steps.get-list.outputs.specs }} - runs-on: ubuntu-latest - steps: - - name: Checkout the repo - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - - name: Get list - id: get-list - run: | - lst=$(ls $RUNTIME_SPECS_DIR/*.json | xargs -I{} basename "{}" .json | jq -R .| jq -sc .) - echo "Found: $lst" - echo "specs=$lst" >> $GITHUB_OUTPUT - - check-runtimes: - name: Check runtime specs - runs-on: ubuntu-latest - needs: - - find-specs - - strategy: - matrix: - specs: ${{ fromJSON(needs.find-specs.outputs.specs) }} - - steps: - - name: Checkout the repo - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - - name: Fetch release artifacts based on release id - env: - OUTPUT_DIR: ${{ env.DATA_DIR }} - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - . ./.github/scripts/common/lib.sh - fetch_release_artifacts - - - name: Install tooling - env: - SUBWASM_VERSION: v0.20.0 - DL_BASE_URL: https://github.com/chevdor/subwasm/releases/download - run: | - wget $DL_BASE_URL/$SUBWASM_VERSION/subwasm_linux_amd64_$SUBWASM_VERSION.deb \ - -O subwasm.deb - sudo dpkg -i subwasm.deb - subwasm --version - - - name: Extract metadata JSON for ${{ matrix.specs }} - env: - RUNTIME: ${{ matrix.specs }} - run: | - WASM=$(ls ${DATA_DIR}/${RUNTIME}*.wasm) - echo "WASM=$WASM" - subwasm show --json "$WASM" > "${DATA_DIR}/${RUNTIME}.json" - - - name: Check specs for ${{ matrix.specs }} - id: build - env: - RUNTIME: ${{ matrix.specs }} - LOGLEVEL: info - run: | - python --version - .github/scripts/check-runtime.py "${DATA_DIR}/${RUNTIME}.json" "${RUNTIME_SPECS_DIR}/${RUNTIME}.json" diff --git a/.github/workflows/check-workspace.yml b/.github/workflows/check-workspace.yml deleted file mode 100644 index 3dd812d7d9b3..000000000000 --- a/.github/workflows/check-workspace.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: Check workspace - -on: - pull_request: - paths: - - "*.toml" - merge_group: - -jobs: - check-workspace: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.0 (22. Sep 2023) - - - name: install python deps - run: pip3 install toml - - - name: check integrity - run: > - python3 .github/scripts/check-workspace.py . - --exclude - "substrate/frame/contracts/fixtures/build" - "substrate/frame/contracts/fixtures/contracts/common" diff --git a/.github/workflows/claim-crates.yml b/.github/workflows/claim-crates.yml deleted file mode 100644 index f3df0bce72d5..000000000000 --- a/.github/workflows/claim-crates.yml +++ /dev/null @@ -1,26 +0,0 @@ -name: Claim Crates - -on: - push: - branches: - - master - -jobs: - claim-crates: - runs-on: ubuntu-latest - environment: master - steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - - name: Rust Cache - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 - with: - cache-on-failure: true - - - name: install parity-publish - run: cargo install parity-publish@0.3.0 - - - name: parity-publish claim - env: - PARITY_PUBLISH_CRATESIO_TOKEN: ${{ secrets.CRATESIO_PUBLISH_CLAIM_TOKEN }} - run: parity-publish claim diff --git a/.github/workflows/fmt-check.yml b/.github/workflows/fmt-check.yml deleted file mode 100644 index efcf278c46e8..000000000000 --- a/.github/workflows/fmt-check.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: Rustfmt check - -on: - push: - branches: - - master - pull_request: - types: [opened, synchronize, reopened, ready_for_review] - merge_group: - -jobs: - quick_check: - strategy: - matrix: - os: ["ubuntu-latest"] - runs-on: ${{ matrix.os }} - container: - image: docker.io/paritytech/ci-unified:bullseye-1.75.0-2024-01-22-v20240109 - steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - - name: Cargo fmt - run: cargo +nightly fmt --all -- --check diff --git a/.github/workflows/gitspiegel-trigger.yml b/.github/workflows/gitspiegel-trigger.yml deleted file mode 100644 index b338f7a3f625..000000000000 --- a/.github/workflows/gitspiegel-trigger.yml +++ /dev/null @@ -1,34 +0,0 @@ -name: gitspiegel sync - -# This workflow doesn't do anything, it's only use is to trigger "workflow_run" -# webhook, that'll be consumed by gitspiegel -# This way, gitspiegel won't do mirroring, unless this workflow runs, -# and running the workflow is protected by GitHub - -on: - pull_request: - types: - - opened - - synchronize - - unlocked - - ready_for_review - - reopened - # the job doesn't check out any code, so it is relatively safe to run it on any event - pull_request_target: - types: - - opened - - synchronize - - unlocked - - ready_for_review - - reopened - merge_group: - -# drop all permissions for GITHUB_TOKEN -permissions: {} - -jobs: - sync: - runs-on: ubuntu-latest - steps: - - name: Do nothing - run: echo "let's go" diff --git a/.github/workflows/merge-queue.yml b/.github/workflows/merge-queue.yml deleted file mode 100644 index cce326f44931..000000000000 --- a/.github/workflows/merge-queue.yml +++ /dev/null @@ -1,24 +0,0 @@ -name: Merge-Queue - -on: - merge_group: - -jobs: - trigger-merge-queue-action: - runs-on: ubuntu-latest - environment: merge-queues - steps: - - name: Generate token - id: app_token - uses: tibdex/github-app-token@3beb63f4bd073e61482598c45c71c1019b59b73a # v2.1.0 - with: - app_id: ${{ secrets.REVIEW_APP_ID }} - private_key: ${{ secrets.REVIEW_APP_KEY }} - - name: Add Merge Queue status check - uses: billyjbryant/create-status-check@3e6fa0ac599d10d9588cf9516ca4330ef669b858 # v2 - with: - authToken: ${{ steps.app_token.outputs.token }} - context: 'review-bot' - description: 'PRs for merge queue gets approved' - state: 'success' - sha: ${{ github.event.merge_group.head_commit.id }} diff --git a/.github/workflows/notif-burnin-label.yml b/.github/workflows/notif-burnin-label.yml deleted file mode 100644 index b630cd07440f..000000000000 --- a/.github/workflows/notif-burnin-label.yml +++ /dev/null @@ -1,24 +0,0 @@ -name: Notify DevOps when burn-in label applied -on: - pull_request: - types: [labeled] - -jobs: - notify-devops: - runs-on: ubuntu-latest - strategy: - matrix: - channel: - - name: 'Team: DevOps' - room: '!lUslSijLMgNcEKcAiE:parity.io' - - steps: - - name: Send Matrix message to ${{ matrix.channel.name }} - if: startsWith(github.event.label.name, 'A0-') - uses: s3krit/matrix-message-action@70ad3fb812ee0e45ff8999d6af11cafad11a6ecf # v0.0.3 - with: - room_id: ${{ matrix.channel.room }} - access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }} - server: m.parity.io - message: | - @room Burn-in request received for [${{ github.event.pull_request.title }}](${{ github.event.pull_request.html_url }}) diff --git a/.github/workflows/parachain.yml b/.github/workflows/parachain.yml new file mode 100644 index 000000000000..e899e3fc35e6 --- /dev/null +++ b/.github/workflows/parachain.yml @@ -0,0 +1,277 @@ +name: bridge + +on: + push: + paths: + - "bridges/snowbridge/**" + - "!bridges/snowbridge/README.md" + - "!bridges/snowbridge/LICENSE" + branches: + - main + pull_request: + workflow_dispatch: + +env: + FUZZ_MAX_LEN: 10000000000 + FUZZ_MAX_RUNS: 30000 + RUST_NIGHTLY: "2023-05-23" + +jobs: + check: + runs-on: snowbridge-runner + env: + CARGO_INCREMENTAL: 0 + RUST_BACKTRACE: 1 + RUSTFLAGS: -C debuginfo=1 + SKIP_WASM_BUILD: 1 + RUST_NIGHTLY_VERSION: nightly-2023-05-23 + steps: + - uses: actions/checkout@v2 + with: + submodules: "true" + - uses: arduino/setup-protoc@v2 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + - uses: actions/cache@v1 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + key: ${{ runner.os }}-cargo-${{ hashFiles('Cargo.lock') }} + restore-keys: | + ${{ runner.os }}-cargo- + - uses: actions/setup-node@v4.0.0 + with: + node-version: "18.x" + registry-url: "https://npm.pkg.github.com" + scope: "@paritytech" + - name: setup rust toolchain + run: | + rustup target add wasm32-unknown-unknown + curl -LO https://github.com/paritytech/rustc-rv32e-toolchain/releases/download/v1.1.0/rust-rve-nightly-2024-01-05-x86_64-unknown-linux-gnu.tar.zst + tar -I zstd -xf rust-rve-nightly-2024-01-05-x86_64-unknown-linux-gnu.tar.zst + mv rve-nightly ~/.rustup/toolchains/ + rustup toolchain install nightly + rustup component add rustfmt --toolchain nightly + rustup show + - name: cargo check + run: cargo check --workspace --all-features + - name: clippy + run: cargo clippy --all-features -- -D warnings + - name: format + run: cargo +nightly fmt --all -- --check + - name: install taplo + run: | + cargo install taplo-cli --locked + - name: taplo + run: taplo format --check --config .config/taplo.toml + - name: install zepter + run: | + cargo install zepter -f --locked + - name: zepter + run: zepter run check + - name: lint-markdown + run: | + npm install -g markdownlint-cli + markdownlint --version + markdownlint --config .github/.markdownlint.yaml --ignore target + + test: + needs: check + runs-on: snowbridge-runner + env: + CARGO_INCREMENTAL: 0 + RUST_BACKTRACE: 1 + RUSTFLAGS: -C debuginfo=1 + SKIP_WASM_BUILD: 1 + RUST_MIN_STACK: 8388608 + steps: + - uses: actions/checkout@v2 + with: + ref: ${{ github.head_ref }} + submodules: "true" + - uses: arduino/setup-protoc@v2 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + - uses: actions/cache@v1 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + key: ${{ runner.os }}-cargo-${{ hashFiles('Cargo.lock') }} + restore-keys: | + ${{ runner.os }}-cargo- + - name: setup rust toolchain + run: rustup show + # Increase stack limit for beacon light client tests + - run: sudo prlimit --pid $$ --stack=32768 + # Run tests for runtime-benchmarks feature + - name: Tests for runtime-benchmarks + working-directory: bridges/snowbridge + run: > + cargo test + --workspace + --features runtime-benchmarks + # Run tests for all features + - name: Tests for all features + working-directory: bridges/snowbridge + run: > + cargo test + --workspace + --all-features + + coverage: + needs: check + runs-on: snowbridge-runner + env: + CARGO_INCREMENTAL: 0 + RUST_BACKTRACE: 1 + RUSTFLAGS: -C debuginfo=1 + SKIP_WASM_BUILD: 1 + RUST_MIN_STACK: 8388608 + steps: + - uses: actions/checkout@v2 + with: + ref: ${{ github.head_ref }} + submodules: "true" + - uses: arduino/setup-protoc@v2 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + - name: setup rust toolchain + run: rustup show + - name: run coverage test + working-directory: bridges/snowbridge + run: > + cargo install cargo-tarpaulin@0.27.0 && + cargo tarpaulin + --workspace + --engine llvm + --out xml + - name: Upload coverage reports to Codecov with GitHub Action + uses: codecov/codecov-action@v3 + with: + working-directory: bridges/snowbridge + files: cobertura.xml + flags: rust + + check-cumulus: + runs-on: snowbridge-runner + steps: + - uses: actions/checkout@v2 + with: + submodules: "true" + - uses: arduino/setup-protoc@v2 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + - name: setup rust toolchain + run: rustup show + - name: check bridge-hub fast-runtime + run: > + cargo check + --release --verbose + --package bridge-hub-rococo-runtime + --features fast-runtime + - name: check bridge-hub runtime-benchmarks + run: > + cargo check + --release --verbose + --package bridge-hub-rococo-runtime + --features runtime-benchmarks + - name: check bridge-hub try-runtime + run: > + cargo check + --release --verbose + --package bridge-hub-rococo-runtime + --features try-runtime + - name: check bridge-hub all features + run: > + cargo check + --release --verbose + --package bridge-hub-rococo-runtime + --all-features + - name: check asset-hub all features + run: > + cargo check + --release --verbose + --package asset-hub-rococo-runtime + --all-features + + runtime-tests: + needs: check + runs-on: snowbridge-runner + steps: + - uses: actions/checkout@v2 + with: + submodules: "true" + - uses: arduino/setup-protoc@v2 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + - name: setup rust toolchain + run: rustup show + - name: snowbridge runtime tests + run: > + RUST_LOG=xcm=trace cargo test + --package bridge-hub-rococo-runtime + --test snowbridge + -- --nocapture + + integration-tests: + needs: check + runs-on: snowbridge-runner + steps: + - uses: actions/checkout@v2 + with: + submodules: "true" + - uses: arduino/setup-protoc@v2 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + - name: setup rust toolchain + run: rustup show + - name: bridge-hub and asset-hub integration tests + run: > + RUST_LOG=xcm=trace cargo test + -p bridge-hub-rococo-integration-tests + -p asset-hub-rococo-integration-tests + -- --nocapture + + beacon-fuzz: + if: false + needs: test + runs-on: snowbridge-runner + env: + CARGO_INCREMENTAL: 0 + RUST_BACKTRACE: 1 + RUSTFLAGS: -C debuginfo=1 + SKIP_WASM_BUILD: 1 + steps: + - uses: actions/checkout@v2 + with: + ref: ${{ github.head_ref }} + submodules: "true" + - uses: actions/cache@v1 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + key: ${{ runner.os }}-cargo-${{ hashFiles('Cargo.lock') }} + restore-keys: | + ${{ runner.os }}-cargo- + - name: install nightly + run: rustup install --profile minimal nightly-$RUST_NIGHTLY + - name: Install cargo-fuzz from crates.io + uses: baptiste0928/cargo-install@v2 + with: + crate: cargo-fuzz + version: "^0.11.2" + - name: Fuzz force checkpoint extrinsic + run: > + cd bridges/snowbridge/pallets/ethereum-beacon-client && cargo +nightly-$RUST_NIGHTLY fuzz run fuzz_force_checkpoint -- + -max_len=$FUZZ_MAX_LEN -runs=$FUZZ_MAX_RUNS + - name: Fuzz submit extrinsic + run: > + cd bridges/snowbridge/pallets/ethereum-beacon-client && cargo +nightly-$RUST_NIGHTLY fuzz run fuzz_submit -- + -max_len=$FUZZ_MAX_LEN -runs=$FUZZ_MAX_RUNS + - name: Fuzz submit execution header extrinsic + run: > + cd bridges/snowbridge/pallets/ethereum-beacon-client && cargo +nightly-$RUST_NIGHTLY fuzz run fuzz_submit_execution_header -- + -max_len=$FUZZ_MAX_LEN -runs=$FUZZ_MAX_RUNS diff --git a/.github/workflows/release-10_rc-automation.yml b/.github/workflows/release-10_rc-automation.yml deleted file mode 100644 index 7231a8b75886..000000000000 --- a/.github/workflows/release-10_rc-automation.yml +++ /dev/null @@ -1,113 +0,0 @@ -name: Release - RC automation -on: - push: - branches: - # Catches release-polkadot-v1.2.3, release-v1.2.3-rc1, etc - - release-v[0-9]+.[0-9]+.[0-9]+* - - release-cumulus-v[0-9]+* - - release-polkadot-v[0-9]+* - - workflow_dispatch: - -jobs: - tag_rc: - runs-on: ubuntu-latest - strategy: - matrix: - channel: - - name: "RelEng: Polkadot Release Coordination" - room: '!cqAmzdIcbOFwrdrubV:parity.io' - environment: release - - steps: - - name: Checkout sources - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - fetch-depth: 0 - - - name: Get release product - id: get_rel_product - shell: bash - run: | - current_branch=$(git branch --show-current) - echo "Current branch: $current_branch" - if [[ "$current_branch" =~ "release-polkadot" ]]; then - echo "product=polkadot" >> $GITHUB_OUTPUT - elif [[ "$current_branch" =~ "release-cumulus" ]]; then - echo "product=polkadot-parachain" >> $GITHUB_OUTPUT - fi - - - - name: Compute next rc tag for polkadot - if: ${{ steps.get_rel_product.outputs.product == 'polkadot' }} - id: compute_tag_polkadot - shell: bash - run: | - . ./.github/scripts/common/lib.sh - - # Get last rc tag if exists, else set it to {version}-rc1 - version=$(get_version_from_ghref ${GITHUB_REF}) - echo "$version" - echo "version=$version" >> $GITHUB_OUTPUT - - last_rc=$(get_latest_rc_tag $version polkadot) - - if [ -n "$last_rc" ]; then - suffix=$(increment_rc_tag $last_rc) - echo "new_tag=$version-rc$suffix" >> $GITHUB_OUTPUT - echo "first_rc=false" >> $GITHUB_OUTPUT - else - echo "new_tag=$version-rc1" >> $GITHUB_OUTPUT - echo "first_rc=true" >> $GITHUB_OUTPUT - fi - - - name: Compute next rc tag for polkadot-parachain - if: ${{ steps.get_rel_product.outputs.product == 'polkadot-parachain' }} - id: compute_tag_cumulus - shell: bash - run: | - . ./.github/scripts/common/lib.sh - - # Get last rc tag if exists, else set it to polkadot-parachains-{version}-rc1 - version=$(get_version_from_ghref ${GITHUB_REF}) - echo "$version" - echo "version=$version" >> $GITHUB_OUTPUT - - last_rc=$(get_latest_rc_tag $version polkadot-parachain) - if [ -n "$last_rc" ]; then - suffix=$(increment_rc_tag $last_rc) - echo "new_tag=polkadot-parachains-$version-rc$suffix" >> $GITHUB_OUTPUT - echo "first_rc=false" >> $GITHUB_OUTPUT - else - echo "new_tag=polkadot-parachain-$version-rc1" >> $GITHUB_OUTPUT - echo "first_rc=true" >> $GITHUB_OUTPUT - fi - - - name: Apply new tag - uses: tvdias/github-tagger@ed7350546e3e503b5e942dffd65bc8751a95e49d # v0.0.2 - with: - # We can't use the normal GITHUB_TOKEN for the following reason: - # https://docs.github.com/en/actions/reference/events-that-trigger-workflows#triggering-new-workflows-using-a-personal-access-token - # RELEASE_BRANCH_TOKEN requires public_repo OAuth scope - repo-token: "${{ secrets.RELEASE_BRANCH_TOKEN }}" - tag: ${{ steps.compute_tag_polkadot.outputs.new_tag || steps.compute_tag_cumulus.outputs.new_tag }} - - # - id: create-issue - # uses: JasonEtco/create-an-issue@e27dddc79c92bc6e4562f268fffa5ed752639abd # v2.9.1 - # # Only create the issue if it's the first release candidate - # if: steps.compute_tag.outputs.first_rc == 'true' - # env: - # GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - # VERSION: ${{ steps.compute_tag.outputs.version }} - # with: - # filename: .github/ISSUE_TEMPLATE/release.md - - - name: Send Matrix message to ${{ matrix.channel.name }} - uses: s3krit/matrix-message-action@70ad3fb812ee0e45ff8999d6af11cafad11a6ecf # v0.0.3 - # if: steps.create-issue.outputs.url != '' - with: - room_id: ${{ matrix.channel.room }} - access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }} - server: m.parity.io - message: | - Release process for polkadot ${{ steps.compute_tag_polkadot.outputs.new_tag || steps.compute_tag_cumulus.outputs.new_tag }} has been started.
diff --git a/.github/workflows/release-50_publish-docker.yml b/.github/workflows/release-50_publish-docker.yml deleted file mode 100644 index ecbac01cd3a5..000000000000 --- a/.github/workflows/release-50_publish-docker.yml +++ /dev/null @@ -1,290 +0,0 @@ -name: Release - Publish Docker Image - -# This workflow listens to published releases or can be triggered manually. -# It builds and published releases and rc candidates. - -on: - #TODO: activate automated run later - # release: - # types: - # - published - workflow_dispatch: - inputs: - image_type: - description: Type of the image to be published - required: true - default: rc - type: choice - options: - - rc - - release - - binary: - description: Binary to be published - required: true - default: polkadot - type: choice - options: - - polkadot - - polkadot-parachain - - release_id: - description: | - Release ID. - You can find it using the command: - curl -s \ - -H "Authorization: Bearer ${GITHUB_TOKEN}" https://api.github.com/repos/$OWNER/$REPO/releases | \ - jq '.[] | { name: .name, id: .id }' - required: true - type: string - - registry: - description: Container registry - required: true - type: string - default: docker.io - - # The owner is often the same than the Docker Hub username but does ont have to be. - # In our case, it is not. - owner: - description: Owner of the container image repo - required: true - type: string - default: parity - - version: - description: version to build/release - default: v0.9.18 - required: true - -permissions: - contents: write - -env: - RELEASE_ID: ${{ inputs.release_id }} - ENGINE: docker - REGISTRY: ${{ inputs.registry }} - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - DOCKER_OWNER: ${{ inputs.owner || github.repository_owner }} - REPO: ${{ github.repository }} - BINARY: ${{ inputs.binary }} - # EVENT_ACTION: ${{ github.event.action }} - EVENT_NAME: ${{ github.event_name }} - IMAGE_TYPE: ${{ inputs.image_type }} - -jobs: - fetch-artifacts: # this job will be triggered for the polkadot-parachain rc and release or polkadot rc image build - if: ${{ inputs.binary == 'polkadot-parachain' || inputs.image_type == 'rc' }} - runs-on: ubuntu-latest - - steps: - - name: Checkout sources - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - #TODO: this step will be needed when automated triggering will work - #this step runs only if the workflow is triggered automatically when new release is published - # if: ${{ env.EVENT_NAME == 'release' && env.EVENT_ACTION != '' && env.EVENT_ACTION == 'published' }} - # run: | - # mkdir -p release-artifacts && cd release-artifacts - - # for f in $BINARY $BINARY.asc $BINARY.sha256; do - # URL="https://github.com/${{ github.event.repository.full_name }}/releases/download/${{ github.event.release.tag_name }}/$f" - # echo " - Fetching $f from $URL" - # wget "$URL" -O "$f" - # done - # chmod a+x $BINARY - # ls -al - - - name: Fetch rc artifacts or release artifacts based on release id - #this step runs only if the workflow is triggered manually - if: ${{ env.EVENT_NAME == 'workflow_dispatch' }} - run: | - . ./.github/scripts/common/lib.sh - - fetch_release_artifacts - - - name: Cache the artifacts - uses: actions/cache@e12d46a63a90f2fae62d114769bbf2a179198b5c # v3.3.3 - with: - key: artifacts-${{ env.BINARY }}-${{ github.sha }} - path: | - ./release-artifacts/${{ env.BINARY }}/**/* - - build-container: # this job will be triggered for the polkadot-parachain rc and release or polkadot rc image build - if: ${{ inputs.binary == 'polkadot-parachain' || inputs.image_type == 'rc' }} - runs-on: ubuntu-latest - needs: fetch-artifacts - environment: release - - steps: - - name: Checkout sources - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - - name: Get artifacts from cache - uses: actions/cache@e12d46a63a90f2fae62d114769bbf2a179198b5c # v3.3.3 - with: - key: artifacts-${{ env.BINARY }}-${{ github.sha }} - fail-on-cache-miss: true - path: | - ./release-artifacts/${{ env.BINARY }}/**/* - - - name: Check sha256 ${{ env.BINARY }} - working-directory: ./release-artifacts/${{ env.BINARY }} - run: | - . ../../.github/scripts/common/lib.sh - - echo "Checking binary $BINARY" - check_sha256 $BINARY && echo "OK" || echo "ERR" - - - name: Check GPG ${{ env.BINARY }} - working-directory: ./release-artifacts/${{ env.BINARY }} - run: | - . ../../.github/scripts/common/lib.sh - import_gpg_keys - check_gpg $BINARY - - - name: Fetch rc commit and tag - if: ${{ env.IMAGE_TYPE == 'rc' }} - id: fetch_rc_refs - run: | - release=release-${{ inputs.release_id }} && \ - echo "release=${release}" >> $GITHUB_OUTPUT - - commit=$(git rev-parse --short HEAD) && \ - echo "commit=${commit}" >> $GITHUB_OUTPUT - - tag=$(git name-rev --tags --name-only $(git rev-parse HEAD)) && \ - [ "${tag}" != "undefined" ] && echo "tag=${tag}" >> $GITHUB_OUTPUT || \ - echo "No tag, doing without" - - - name: Fetch release tags - working-directory: ./release-artifacts/${{ env.BINARY }} - if: ${{ env.IMAGE_TYPE == 'release'}} - id: fetch_release_refs - run: | - chmod a+rx $BINARY - VERSION=$(./$BINARY --version | awk '{ print $2 }' ) - release=$( echo $VERSION | cut -f1 -d- ) - echo "tag=latest" >> $GITHUB_OUTPUT - echo "release=${release}" >> $GITHUB_OUTPUT - - - name: Build Injected Container image for polkadot rc - if: ${{ env.BINARY == 'polkadot' }} - env: - ARTIFACTS_FOLDER: ./release-artifacts - IMAGE_NAME: ${{ env.BINARY }} - OWNER: ${{ env.DOCKER_OWNER }} - TAGS: ${{ join(steps.fetch_rc_refs.outputs.*, ',') || join(steps.fetch_release_refs.outputs.*, ',') }} - run: | - ls -al - echo "Building container for $BINARY" - ./docker/scripts/build-injected.sh - - - name: Build Injected Container image for polkadot-parachain - if: ${{ env.BINARY == 'polkadot-parachain' }} - env: - ARTIFACTS_FOLDER: ./release-artifacts - IMAGE_NAME: ${{ env.BINARY }} - OWNER: ${{ env.DOCKER_OWNER }} - DOCKERFILE: docker/dockerfiles/polkadot-parachain/polkadot-parachain_injected.Dockerfile - TAGS: ${{ join(steps.fetch_rc_refs.outputs.*, ',') || join(steps.fetch_release_refs.outputs.*, ',') }} - run: | - ls -al - mkdir -p $ARTIFACTS_FOLDER/specs - cp cumulus/parachains/chain-specs/*.json $ARTIFACTS_FOLDER/specs - - echo "Building container for $BINARY" - ./docker/scripts/build-injected.sh - - - name: Login to Dockerhub - uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 - with: - username: ${{ secrets.CUMULUS_DOCKERHUB_USERNAME }} - password: ${{ secrets.CUMULUS_DOCKERHUB_TOKEN }} - - - name: Push Container image for ${{ env.BINARY }} - id: docker_push - run: | - $ENGINE images | grep ${BINARY} - $ENGINE push --all-tags ${REGISTRY}/${DOCKER_OWNER}/${BINARY} - - - name: Check version for the published image for ${{ env.BINARY }} - env: - RELEASE_TAG: ${{ steps.fetch_rc_refs.outputs.release || steps.fetch_release_refs.outputs.release }} - run: | - echo "Checking tag ${RELEASE_TAG} for image ${REGISTRY}/${DOCKER_OWNER}/${BINARY}" - $ENGINE run -i ${REGISTRY}/${DOCKER_OWNER}/${BINARY}:${RELEASE_TAG} --version - - fetch-latest-debian-package-version: # this job will be triggered for polkadot release build - if: ${{ inputs.binary == 'polkadot' && inputs.image_type == 'release' }} - runs-on: ubuntu-latest - outputs: - polkadot_apt_version: ${{ steps.fetch-latest-apt.outputs.polkadot_apt_version }} - polkadot_container_tag: ${{ steps.fetch-latest-apt.outputs.polkadot_container_tag }} - container: - image: paritytech/parity-keyring - options: --user root - steps: - - name: Get version - id: fetch-latest-apt - run: | - apt update - apt show polkadot - version=$(apt show polkadot 2>/dev/null | grep "Version:" | awk '{print $2}') - tag=$(echo $version | sed 's/-.*//') - echo "polkadot_apt_version=v$version" >> $GITHUB_OUTPUT - echo "polkadot_container_tag=v$tag" >> $GITHUB_OUTPUT - echo "You passed ${{ inputs.version }} but this is ignored" - echo "We use the version from the Debian Package: $version" - - build-polkadot-release-container: # this job will be triggered for polkadot release build - if: ${{ inputs.binary == 'polkadot' && inputs.image_type == 'release' }} - runs-on: ubuntu-latest - needs: fetch-latest-debian-package-version - environment: release - steps: - - name: Checkout sources - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0 - - - name: Cache Docker layers - uses: actions/cache@e12d46a63a90f2fae62d114769bbf2a179198b5c # v3.3.3 - with: - path: /tmp/.buildx-cache - key: ${{ runner.os }}-buildx-${{ github.sha }} - restore-keys: | - ${{ runner.os }}-buildx- - - - name: Login to Docker Hub - uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 - with: - username: ${{ secrets.POLKADOT_DOCKERHUB_USERNAME }} - password: ${{ secrets.POLKADOT_DOCKERHUB_TOKEN }} - - - name: Fetch values - id: fetch-data - run: | - date=$(date -u '+%Y-%m-%dT%H:%M:%SZ') - echo "date=$date" >> $GITHUB_OUTPUT - - - name: Build and push - id: docker_build - uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56 # v5.1.0 - with: - push: true - file: docker/dockerfiles/polkadot/polkadot_injected_debian.Dockerfile - # TODO: The owner should be used below but buildx does not resolve the VARs - # TODO: It would be good to get rid of this GHA that we don't really need. - tags: | - parity/polkadot:latest - parity/polkadot:${{ needs.fetch-latest-debian-package-version.outputs.polkadot_container_tag }} - build-args: | - VCS_REF=${{ github.ref }} - POLKADOT_VERSION=${{ needs.fetch-latest-debian-package-version.outputs.polkadot_apt_version }} - BUILD_DATE=${{ steps.fetch-data.outputs.date }} - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache - - name: Image digest - run: echo ${{ steps.docker_build.outputs.digest }} diff --git a/.github/workflows/release-99_notif-published.yml b/.github/workflows/release-99_notif-published.yml deleted file mode 100644 index b35120ca4e12..000000000000 --- a/.github/workflows/release-99_notif-published.yml +++ /dev/null @@ -1,61 +0,0 @@ -name: Release - Announce release to Matrix rooms -on: - release: - types: - - published - - prereleased - -jobs: - ping_matrix: - runs-on: ubuntu-latest - strategy: - matrix: - channel: - # Internal - - name: 'RelEng: Cumulus Release Coordination' - room: '!NAEMyPAHWOiOQHsvus:parity.io' - pre-releases: true - - name: "RelEng: Polkadot Release Coordination" - room: '!cqAmzdIcbOFwrdrubV:parity.io' - pre-release: true - - name: 'General: Rust, Polkadot, Substrate' - room: '!aJymqQYtCjjqImFLSb:parity.io' - pre-release: false - - name: 'Team: DevOps' - room: '!lUslSijLMgNcEKcAiE:parity.io' - pre-release: true - - # External - - name: 'Ledger <> Polkadot Coordination' - room: '!EoIhaKfGPmFOBrNSHT:web3.foundation' - pre-release: true - - # Public - # - name: '#KusamaValidatorLounge:polkadot.builders' - # room: '!LhjZccBOqFNYKLdmbb:polkadot.builders' - # pre-releases: false - # - name: '#kusama-announcements:matrix.parity.io' - # room: '!FMwxpQnYhRCNDRsYGI:matrix.parity.io' - # pre-release: false - # - name: '#polkadotvalidatorlounge:web3.foundation' - # room: '!NZrbtteFeqYKCUGQtr:matrix.parity.io' - # pre-release: false - # - name: '#polkadot-announcements:matrix.parity.io' - # room: '!UqHPWiCBGZWxrmYBkF:matrix.parity.io' - # pre-release: false - - steps: - - name: Matrix notification to ${{ matrix.channel.name }} - if: github.event.release.prerelease == false || matrix.channel.pre-release - uses: s3krit/matrix-message-action@70ad3fb812ee0e45ff8999d6af11cafad11a6ecf # v0.0.3 - with: - room_id: ${{ matrix.channel.room }} - access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }} - server: m.parity.io - message: | - A (pre)release has been ${{github.event.action}} in **${{github.event.repository.full_name}}:**
- Release version: [${{github.event.release.tag_name}}](${{github.event.release.html_url}}) - - ----- - - ${{github.event.release.body}} diff --git a/.github/workflows/review-bot.yml b/.github/workflows/review-bot.yml deleted file mode 100644 index 5b036115b238..000000000000 --- a/.github/workflows/review-bot.yml +++ /dev/null @@ -1,32 +0,0 @@ -name: Review Bot -on: - workflow_run: - workflows: - - Review-Trigger - types: - - completed - -jobs: - review-approvals: - runs-on: ubuntu-latest - environment: master - steps: - - name: Extract content of artifact - id: number - uses: Bullrich/extract-text-from-artifact@v1.0.0 - with: - artifact-name: pr_number - - name: Generate token - id: app_token - uses: tibdex/github-app-token@v1 - with: - app_id: ${{ secrets.REVIEW_APP_ID }} - private_key: ${{ secrets.REVIEW_APP_KEY }} - - name: "Evaluates PR reviews and assigns reviewers" - uses: paritytech/review-bot@v2.4.0 - with: - repo-token: ${{ steps.app_token.outputs.token }} - team-token: ${{ steps.app_token.outputs.token }} - checks-token: ${{ steps.app_token.outputs.token }} - pr-number: ${{ steps.number.outputs.content }} - request-reviewers: true diff --git a/.github/workflows/review-trigger.yml b/.github/workflows/review-trigger.yml deleted file mode 100644 index 8b23dd30bb29..000000000000 --- a/.github/workflows/review-trigger.yml +++ /dev/null @@ -1,36 +0,0 @@ -name: Review-Trigger - -on: - pull_request_target: - types: - - opened - - reopened - - synchronize - - review_requested - - review_request_removed - - ready_for_review - pull_request_review: - -jobs: - trigger-review-bot: - # (It is not a draft) && (it is not a review || it is an approving review) - if: ${{ github.event.pull_request.draft != true && (github.event_name != 'pull_request_review' || (github.event.review && github.event.review.state == 'APPROVED')) }} - runs-on: ubuntu-latest - name: trigger review bot - steps: - - name: Skip merge queue - if: ${{ contains(github.ref, 'gh-readonly-queue') }} - run: exit 0 - - name: Get PR number - env: - PR_NUMBER: ${{ github.event.pull_request.number }} - run: | - echo "Saving PR number: $PR_NUMBER" - mkdir -p ./pr - echo $PR_NUMBER > ./pr/pr_number - - uses: actions/upload-artifact@v3 - name: Save PR number - with: - name: pr_number - path: pr/ - retention-days: 5 diff --git a/.github/workflows/srtool.yml b/.github/workflows/srtool.yml deleted file mode 100644 index eb15538f559d..000000000000 --- a/.github/workflows/srtool.yml +++ /dev/null @@ -1,135 +0,0 @@ -name: Srtool build - -env: - SUBWASM_VERSION: 0.20.0 - TOML_CLI_VERSION: 0.2.4 - -on: - push: - tags: - - "*" - branches: - - release-v[0-9]+.[0-9]+.[0-9]+* - - release-cumulus-v[0-9]+* - - release-polkadot-v[0-9]+* - - schedule: - - cron: "00 02 * * 1" # 2AM weekly on monday - - workflow_dispatch: - -jobs: - find-runtimes: - name: Scan repo paritytech/polkadot-sdk - outputs: - runtime: ${{ steps.get_runtimes_list.outputs.runtime }} - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 - with: - fetch-depth: 0 - - - name: Install tooling - run: | - URL=https://github.com/chevdor/toml-cli/releases/download/v${{ env.TOML_CLI_VERSION }}/toml_linux_amd64_v${{ env.TOML_CLI_VERSION }}.deb - curl -L $URL --output toml.deb - sudo dpkg -i toml.deb - toml --version; jq --version - - - name: Scan runtimes - env: - EXCLUDED_RUNTIMES: "substrate-test" - run: | - . ./.github/scripts/common/lib.sh - - echo "Github workspace: ${{ github.workspace }}" - echo "Current folder: $(pwd)"; ls -al - ls -al - - MATRIX=$(find_runtimes | tee runtimes_list.json) - echo $MATRIX - - - name: Get runtimes list - id: get_runtimes_list - run: | - ls -al - MATRIX=$(cat runtimes_list.json) - echo $MATRIX - echo "runtime=$MATRIX" >> $GITHUB_OUTPUT - - srtool: - runs-on: ubuntu-latest - needs: - - find-runtimes - strategy: - fail-fast: false - matrix: ${{ fromJSON(needs.find-runtimes.outputs.runtime) }} - - steps: - - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 - with: - fetch-depth: 0 - - - name: Srtool build - id: srtool_build - uses: chevdor/srtool-actions@v0.9.2 - with: - chain: ${{ matrix.chain }} - runtime_dir: ${{ matrix.runtime_dir }} - - - name: Summary - run: | - echo '${{ steps.srtool_build.outputs.json }}' | jq > ${{ matrix.chain }}-srtool-digest.json - cat ${{ matrix.chain }}-srtool-digest.json - echo "Compact Runtime: ${{ steps.srtool_build.outputs.wasm }}" - echo "Compressed Runtime: ${{ steps.srtool_build.outputs.wasm_compressed }}" - - # it takes a while to build the runtime, so let's save the artifact as soon as we have it - - name: Archive Artifacts for ${{ matrix.chain }} - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 - with: - name: ${{ matrix.chain }}-runtime - path: | - ${{ steps.srtool_build.outputs.wasm }} - ${{ steps.srtool_build.outputs.wasm_compressed }} - ${{ matrix.chain }}-srtool-digest.json - - # We now get extra information thanks to subwasm - - name: Install subwasm - run: | - wget https://github.com/chevdor/subwasm/releases/download/v${{ env.SUBWASM_VERSION }}/subwasm_linux_amd64_v${{ env.SUBWASM_VERSION }}.deb - sudo dpkg -i subwasm_linux_amd64_v${{ env.SUBWASM_VERSION }}.deb - subwasm --version - - - name: Show Runtime information - shell: bash - run: | - subwasm info ${{ steps.srtool_build.outputs.wasm }} - subwasm info ${{ steps.srtool_build.outputs.wasm_compressed }} - subwasm --json info ${{ steps.srtool_build.outputs.wasm }} > ${{ matrix.chain }}-info.json - subwasm --json info ${{ steps.srtool_build.outputs.wasm_compressed }} > ${{ matrix.chain }}-compressed-info.json - - - name: Extract the metadata - shell: bash - run: | - subwasm meta ${{ steps.srtool_build.outputs.wasm }} - subwasm --json meta ${{ steps.srtool_build.outputs.wasm }} > ${{ matrix.chain }}-metadata.json - - - name: Check the metadata diff - shell: bash - # the following subwasm call will error for chains that are not known and/or live, that includes shell for instance - run: | - subwasm diff ${{ steps.srtool_build.outputs.wasm }} --chain-b ${{ matrix.chain }} || \ - echo "Subwasm call failed, check the logs. This is likely because ${{ matrix.chain }} is not known by subwasm" | \ - tee ${{ matrix.chain }}-diff.txt - - - name: Archive Subwasm results - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 - with: - name: ${{ matrix.chain }}-runtime - path: | - ${{ matrix.chain }}-info.json - ${{ matrix.chain }}-compressed-info.json - ${{ matrix.chain }}-metadata.json - ${{ matrix.chain }}-diff.txt diff --git a/Cargo.lock b/Cargo.lock index 27a1efc38098..13ddeae06263 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17571,7 +17571,7 @@ dependencies = [ [[package]] name = "snowbridge-beacon-primitives" -version = "0.0.0" +version = "0.9.0" dependencies = [ "byte-slice-cast", "frame-support", @@ -17595,7 +17595,7 @@ dependencies = [ [[package]] name = "snowbridge-core" -version = "0.0.0" +version = "0.9.0" dependencies = [ "ethabi-decode", "frame-support", @@ -17618,7 +17618,7 @@ dependencies = [ [[package]] name = "snowbridge-ethereum" -version = "0.1.0" +version = "0.9.0" dependencies = [ "ethabi-decode", "ethbloom", @@ -17657,7 +17657,7 @@ dependencies = [ [[package]] name = "snowbridge-outbound-queue-merkle-tree" -version = "0.1.1" +version = "0.9.0" dependencies = [ "array-bytes 4.2.0", "env_logger 0.9.3", @@ -17672,7 +17672,7 @@ dependencies = [ [[package]] name = "snowbridge-outbound-queue-runtime-api" -version = "0.0.0" +version = "0.9.0" dependencies = [ "frame-support", "parity-scale-codec", @@ -17686,7 +17686,7 @@ dependencies = [ [[package]] name = "snowbridge-pallet-ethereum-client" -version = "0.0.0" +version = "0.9.0" dependencies = [ "bp-runtime", "byte-slice-cast", @@ -17732,7 +17732,7 @@ dependencies = [ [[package]] name = "snowbridge-pallet-inbound-queue" -version = "0.0.0" +version = "0.9.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -17779,7 +17779,7 @@ dependencies = [ [[package]] name = "snowbridge-pallet-outbound-queue" -version = "0.0.0" +version = "0.9.0" dependencies = [ "bridge-hub-common", "ethabi-decode", @@ -17804,7 +17804,7 @@ dependencies = [ [[package]] name = "snowbridge-pallet-system" -version = "0.0.0" +version = "0.9.0" dependencies = [ "ethabi-decode", "frame-benchmarking", @@ -17832,7 +17832,7 @@ dependencies = [ [[package]] name = "snowbridge-router-primitives" -version = "0.0.0" +version = "0.9.0" dependencies = [ "ethabi-decode", "frame-support", @@ -17855,7 +17855,7 @@ dependencies = [ [[package]] name = "snowbridge-runtime-common" -version = "0.0.0" +version = "0.9.0" dependencies = [ "frame-support", "frame-system", @@ -17871,7 +17871,7 @@ dependencies = [ [[package]] name = "snowbridge-runtime-test-common" -version = "0.0.0" +version = "0.9.0" dependencies = [ "assets-common", "bridge-hub-test-utils", @@ -17948,7 +17948,7 @@ dependencies = [ [[package]] name = "snowbridge-system-runtime-api" -version = "0.0.0" +version = "0.9.0" dependencies = [ "parity-scale-codec", "snowbridge-core", diff --git a/bridges/snowbridge/.gitignore b/bridges/snowbridge/.gitignore new file mode 100644 index 000000000000..2c1d07caf6fc --- /dev/null +++ b/bridges/snowbridge/.gitignore @@ -0,0 +1,17 @@ +# Generated by Cargo +# will have compiled files and executables +**/target/ +# These are backup files generated by rustfmt +**/*.rs.bk + +*.log +rococo-local-raw.json +rococo-local.json + +.envrc + +# The cache for chain data in container +.local + +# coverage reports +cobertura.xml diff --git a/bridges/snowbridge/Cargo.toml b/bridges/snowbridge/Cargo.toml new file mode 100644 index 000000000000..a9e9ed9abf95 --- /dev/null +++ b/bridges/snowbridge/Cargo.toml @@ -0,0 +1,54 @@ +[workspace.package] +authors = ["Snowfork "] +edition = "2021" +repository = "https://github.com/snowfork/polkadot-sdk.git" + +[workspace] +resolver = "2" +members = [ + "pallets/ethereum-client", + "pallets/ethereum-client/fixtures", + "pallets/inbound-queue", + "pallets/inbound-queue/fixtures", + "pallets/outbound-queue", + "pallets/outbound-queue/merkle-tree", + "pallets/outbound-queue/runtime-api", + "pallets/system", + "pallets/system/runtime-api", + "primitives/beacon", + "primitives/core", + "primitives/ethereum", + "primitives/router", + "runtime/runtime-common", + "runtime/test-common", +] + + +[workspace.lints.rust] +suspicious_double_ref_op = { level = "allow", priority = 2 } + +[workspace.lints.clippy] +all = { level = "allow", priority = 0 } +correctness = { level = "warn", priority = 1 } +complexity = { level = "warn", priority = 1 } +if-same-then-else = { level = "allow", priority = 2 } +zero-prefixed-literal = { level = "allow", priority = 2 } # 00_1000_000 +type_complexity = { level = "allow", priority = 2 } # raison d'etre +nonminimal-bool = { level = "allow", priority = 2 } # maybe +borrowed-box = { level = "allow", priority = 2 } # Reasonable to fix this one +too-many-arguments = { level = "allow", priority = 2 } # (Turning this on would lead to) +needless-lifetimes = { level = "allow", priority = 2 } # generated code +unnecessary_cast = { level = "allow", priority = 2 } # Types may change +identity-op = { level = "allow", priority = 2 } # One case where we do 0 + +useless_conversion = { level = "allow", priority = 2 } # Types may change +unit_arg = { level = "allow", priority = 2 } # stylistic +option-map-unit-fn = { level = "allow", priority = 2 } # stylistic +bind_instead_of_map = { level = "allow", priority = 2 } # stylistic +erasing_op = { level = "allow", priority = 2 } # E.g. 0 * DOLLARS +eq_op = { level = "allow", priority = 2 } # In tests we test equality. +while_immutable_condition = { level = "allow", priority = 2 } # false positives +needless_option_as_deref = { level = "allow", priority = 2 } # false positives +derivable_impls = { level = "allow", priority = 2 } # false positives +stable_sort_primitive = { level = "allow", priority = 2 } # prefer stable sort +extra-unused-type-parameters = { level = "allow", priority = 2 } # stylistic +default_constructed_unit_structs = { level = "allow", priority = 2 } # stylistic diff --git a/bridges/snowbridge/README.md b/bridges/snowbridge/README.md index 49b9c2eaf553..3e5f63098ac9 100644 --- a/bridges/snowbridge/README.md +++ b/bridges/snowbridge/README.md @@ -1,32 +1,40 @@ -# Snowbridge -[![codecov](https://codecov.io/gh/Snowfork/snowbridge/branch/main/graph/badge.svg?token=9hvgSws4rN)](https://codecov.io/gh/Snowfork/snowbridge) +# Snowbridge · +[![codecov](https://codecov.io/gh/Snowfork/polkadot-sdk/branch/snowbridge/graph/badge.svg?token=9hvgSws4rN)](https://codecov.io/gh/Snowfork/polkadot-sdk) ![GitHub](https://img.shields.io/github/license/Snowfork/snowbridge) Snowbridge is a trustless bridge between Polkadot and Ethereum. For documentation, visit https://docs.snowbridge.network. ## Components +The Snowbridge project lives in two repositories: + +- [Snowfork/polkadot-sdk](https://github.com/Snowfork/polkadot-sdk): The Snowbridge parachain and pallets live in +a fork of the polkadot-sdk. Changes are eventually contributed back to +[paritytech/polkadot-sdk](https://github.com/paritytech/polkadot-sdk) +- [Snowfork/snowbridge](https://github.com/Snowfork/snowbridge): The rest of the Snowbridge components, like contracts, +off-chain relayer, end-to-end tests and test-net setup code. + ### Parachain -Polkadot parachain and our pallets. See [parachain/README.md](https://github.com/Snowfork/snowbridge/blob/main/parachain/README.md). +Polkadot parachain and our pallets. See [README.md](https://github.com/Snowfork/polkadot-sdk/blob/snowbridge/bridges/snowbridge/README.md). ### Contracts -Ethereum contracts and unit tests. See [contracts/README.md](https://github.com/Snowfork/snowbridge/blob/main/contracts/README.md) +Ethereum contracts and unit tests. See [Snowfork/snowbridge/contracts/README.md](https://github.com/Snowfork/snowbridge/blob/main/contracts/README.md) ### Relayer Off-chain relayer services for relaying messages between Polkadot and Ethereum. See -[relayer/README.md](https://github.com/Snowfork/snowbridge/blob/main/relayer/README.md) +[Snowfork/snowbridge/relayer/README.md](https://github.com/Snowfork/snowbridge/blob/main/relayer/README.md) ### Local Testnet Scripts to provision a local testnet, running the above services to bridge between local deployments of Polkadot and -Ethereum. See [web/packages/test/README.md](https://github.com/Snowfork/snowbridge/blob/main/web/packages/test/README.md). +Ethereum. See [Snowfork/snowbridge/web/packages/test/README.md](https://github.com/Snowfork/snowbridge/blob/main/web/packages/test/README.md). ### Smoke Tests -Integration tests for our local testnet. See [smoketest/README.md](https://github.com/Snowfork/snowbridge/blob/main/smoketest/README.md). +Integration tests for our local testnet. See [Snowfork/snowbridge/smoketest/README.md](https://github.com/Snowfork/snowbridge/blob/main/smoketest/README.md). ## Development @@ -83,7 +91,7 @@ direnv allow ### Upgrading the Rust toolchain -Sometimes we would like to upgrade rust toolchain. First update `parachain/rust-toolchain.toml` as required and then +Sometimes we would like to upgrade rust toolchain. First update `rust-toolchain.toml` as required and then update `flake.lock` running ```sh nix flake lock --update-input rust-overlay diff --git a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml index 2f76d5b83579..a30cb88c908e 100644 --- a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml +++ b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-pallet-ethereum-client" description = "Snowbridge Ethereum Client Pallet" -version = "0.0.0" +version = "0.9.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/pallets/ethereum-client/fuzz/.gitignore b/bridges/snowbridge/pallets/ethereum-client/fuzz/.gitignore new file mode 100644 index 000000000000..2f940cc8b2ea --- /dev/null +++ b/bridges/snowbridge/pallets/ethereum-client/fuzz/.gitignore @@ -0,0 +1,5 @@ +target +corpus +artifacts +coverage +.idea diff --git a/bridges/snowbridge/pallets/ethereum-client/fuzz/Cargo.toml b/bridges/snowbridge/pallets/ethereum-client/fuzz/Cargo.toml new file mode 100644 index 000000000000..2a4e91590c24 --- /dev/null +++ b/bridges/snowbridge/pallets/ethereum-client/fuzz/Cargo.toml @@ -0,0 +1,41 @@ +[package] +name = "snowbridge-ethereum-client-fuzz" +version = "0.0.0" +publish = false +edition = "2021" + +[package.metadata] +cargo-fuzz = true + +[dependencies] +libfuzzer-sys = { version = "0.4", features = ["arbitrary-derive"] } +arbitrary = { version = "1", optional = true, features = ["derive"] } +snowbridge-pallet-ethereum-client = { path = "..", features = ["fuzzing"] } +snowbridge-beacon-primitives = { path = "../../../primitives/beacon" } +hex-literal = "0.4.1" +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } + +# Prevent this from interfering with workspaces +[workspace] +members = ["."] + +[profile.release] +debug = 1 + +[[bin]] +name = "fuzz_force_checkpoint" +path = "fuzz_targets/fuzz_force_checkpoint.rs" +test = false +doc = false + +[[bin]] +name = "fuzz_submit" +path = "fuzz_targets/fuzz_submit.rs" +test = false +doc = false + +[[bin]] +name = "fuzz_submit_execution_header" +path = "fuzz_targets/fuzz_submit_execution_header.rs" +test = false +doc = false diff --git a/bridges/snowbridge/pallets/ethereum-client/fuzz/README.md b/bridges/snowbridge/pallets/ethereum-client/fuzz/README.md new file mode 100644 index 000000000000..eeecf199ca04 --- /dev/null +++ b/bridges/snowbridge/pallets/ethereum-client/fuzz/README.md @@ -0,0 +1,21 @@ +# Beacon client fuzz tests + +This crate contains fuzz tests for the three beacon client extrinsics. + +# Installation + +``` +cargo install cargo-fuzz +``` + +# Run tests + +- Force Checkpoint: `cargo fuzz run fuzz_force_checkpoint -- -max_len=10000000000` +- Submit: `cargo fuzz run fuzz_submit -- -max_len=10000000000` +- Submit Execution Header: `cargo fuzz run fuzz_submit_execution_header -- -max_len=10000000000` + +Note: `max-len` is necessary because the max input length is 4096 bytes. Some of our inputs are larger than this +default value. When running the tests without an increased max len parameter, no fuzz data will be generated. + +The tests will keep running until a crash is found, so in our CI setup the number of runs is limited so that the +test completes. diff --git a/bridges/snowbridge/pallets/ethereum-client/fuzz/fuzz_targets/fuzz_force_checkpoint.rs b/bridges/snowbridge/pallets/ethereum-client/fuzz/fuzz_targets/fuzz_force_checkpoint.rs new file mode 100644 index 000000000000..2e3d7543f469 --- /dev/null +++ b/bridges/snowbridge/pallets/ethereum-client/fuzz/fuzz_targets/fuzz_force_checkpoint.rs @@ -0,0 +1,16 @@ +#![no_main] +extern crate snowbridge_pallet_ethereum_client; + +use libfuzzer_sys::fuzz_target; +use snowbridge_pallet_ethereum_client::{mock::*, types::CheckpointUpdate}; +use snowbridge_ethereum_beacon_client_fuzz::types::FuzzCheckpointUpdate; +use std::convert::TryInto; + +fuzz_target!(|input: FuzzCheckpointUpdate| { + new_tester().execute_with(|| { + let update: CheckpointUpdate = input.try_into().unwrap(); + let result = + EthereumBeaconClient::force_checkpoint(RuntimeOrigin::root(), Box::new(update)); + assert!(result.is_err()); + }); +}); diff --git a/bridges/snowbridge/pallets/ethereum-client/fuzz/fuzz_targets/fuzz_submit.rs b/bridges/snowbridge/pallets/ethereum-client/fuzz/fuzz_targets/fuzz_submit.rs new file mode 100644 index 000000000000..79c69e2cb23f --- /dev/null +++ b/bridges/snowbridge/pallets/ethereum-client/fuzz/fuzz_targets/fuzz_submit.rs @@ -0,0 +1,16 @@ +#![no_main] +extern crate snowbridge_pallet_ethereum_client; + +use snowbridge_pallet_ethereum_client::{mock::*, types::Update}; +use snowbridge_ethereum_beacon_client_fuzz::types::FuzzUpdate; +use std::convert::TryInto; + +use libfuzzer_sys::fuzz_target; + +fuzz_target!(|input: FuzzUpdate| { + new_tester().execute_with(|| { + let update: Update = input.try_into().unwrap(); + let result = EthereumBeaconClient::submit(RuntimeOrigin::signed(1), Box::new(update)); + assert!(result.is_err()); + }); +}); diff --git a/bridges/snowbridge/pallets/ethereum-client/fuzz/fuzz_targets/fuzz_submit_execution_header.rs b/bridges/snowbridge/pallets/ethereum-client/fuzz/fuzz_targets/fuzz_submit_execution_header.rs new file mode 100644 index 000000000000..82f4ea97079b --- /dev/null +++ b/bridges/snowbridge/pallets/ethereum-client/fuzz/fuzz_targets/fuzz_submit_execution_header.rs @@ -0,0 +1,19 @@ +#![no_main] +extern crate snowbridge_pallet_ethereum_client; + +use snowbridge_beacon_primitives::ExecutionHeaderUpdate; +use snowbridge_pallet_ethereum_client::mock::*; +use snowbridge_pallet_ethereum_client::types::FuzzExecutionHeaderUpdate; + +use libfuzzer_sys::fuzz_target; + +fuzz_target!(|input: FuzzExecutionHeaderUpdate| { + new_tester().execute_with(|| { + let update: ExecutionHeaderUpdate = input.try_into().unwrap(); + let result = EthereumBeaconClient::submit_execution_header( + RuntimeOrigin::signed(1), + Box::new(update), + ); + assert!(result.is_err()); + }); +}); diff --git a/bridges/snowbridge/pallets/ethereum-client/fuzz/src/impls.rs b/bridges/snowbridge/pallets/ethereum-client/fuzz/src/impls.rs new file mode 100644 index 000000000000..b216429fd3c4 --- /dev/null +++ b/bridges/snowbridge/pallets/ethereum-client/fuzz/src/impls.rs @@ -0,0 +1,220 @@ +use crate::types::{ + FuzzAncestryProof, FuzzBeaconHeader, FuzzCheckpointUpdate, FuzzExecutionHeaderUpdate, + FuzzExecutionPayloadHeader, FuzzNextSyncCommitteeUpdate, FuzzSyncAggregate, FuzzSyncCommittee, + FuzzUpdate, +}; +use arbitrary::{Arbitrary, Result, Unstructured}; +use libfuzzer_sys::arbitrary; +use snowbridge_beacon_primitives::{ + types::BeaconHeader, updates::AncestryProof, ExecutionHeaderUpdate, ExecutionPayloadHeader, + PublicKey, +}; +use snowbridge_pallet_ethereum_client::types::{ + CheckpointUpdate, NextSyncCommitteeUpdate, SyncAggregate, SyncCommittee, Update, +}; +use sp_core::H256; +use std::convert::TryInto; + +impl TryFrom for Update { + type Error = String; + + fn try_from(other: FuzzUpdate) -> Result { + let next: Option = other + .next_sync_committee_update + .map(|fuzz_update| fuzz_update.try_into().unwrap()); + + Ok(Self { + attested_header: other.attested_header.clone().try_into().unwrap(), + sync_aggregate: other.sync_aggregate.try_into().unwrap(), + signature_slot: other.signature_slot, + next_sync_committee_update: next, + finalized_header: other.finalized_header.clone().try_into().unwrap(), + finality_branch: other + .finality_branch + .iter() + .map(|&hash| H256::from(hash)) + .collect::>() + .as_slice() + .try_into() + .unwrap(), + block_roots_root: other.block_roots_root.into(), + block_roots_branch: other + .block_roots_branch + .iter() + .map(|&hash| H256::from(hash)) + .collect::>() + .as_slice() + .try_into() + .unwrap(), + }) + } +} + +impl TryFrom for NextSyncCommitteeUpdate { + type Error = String; + + fn try_from(other: FuzzNextSyncCommitteeUpdate) -> Result { + Ok(Self { + next_sync_committee: other.next_sync_committee.try_into().unwrap(), + next_sync_committee_branch: other + .next_sync_committee_branch + .iter() + .map(|&hash| H256::from(hash)) + .collect::>() + .as_slice() + .try_into() + .unwrap(), + }) + } +} + +impl TryFrom for SyncAggregate { + type Error = String; + + fn try_from(other: FuzzSyncAggregate) -> Result { + Ok(Self { + sync_committee_bits: other.sync_committee_bits.into(), + sync_committee_signature: other.sync_committee_signature.into(), + }) + } +} + +impl TryFrom for CheckpointUpdate { + type Error = String; + + fn try_from(other: FuzzCheckpointUpdate) -> Result { + Ok(Self { + header: other.header.clone().try_into().unwrap(), + current_sync_committee: other.current_sync_committee.try_into().unwrap(), + current_sync_committee_branch: other + .current_sync_committee_branch + .iter() + .map(|&hash| H256::from(hash)) + .collect::>() + .as_slice() + .try_into() + .unwrap(), + validators_root: other.validators_root.into(), + block_roots_root: other.block_roots_root.into(), + block_roots_branch: other + .block_roots_branch + .iter() + .map(|&hash| H256::from(hash)) + .collect::>() + .as_slice() + .try_into() + .unwrap(), + }) + } +} + +impl TryFrom for SyncCommittee { + type Error = String; + + fn try_from(other: FuzzSyncCommittee) -> Result { + Ok(Self { + pubkeys: other + .pubkeys + .iter() + .map(|&pk| { pk.into() }) + .collect::>() + .as_slice() + .try_into() + .unwrap(), + aggregate_pubkey: other.aggregate_pubkey.into(), + }) + } +} + +impl TryFrom for AncestryProof { + type Error = String; + + fn try_from(other: FuzzAncestryProof) -> Result { + Ok(Self { + header_branch: other + .header_branch + .iter() + .map(|&hash| H256::from(hash)) + .collect::>() + .as_slice() + .try_into() + .unwrap(), + finalized_block_root: other.finalized_block_root.into(), + }) + } +} + +impl TryFrom for ExecutionPayloadHeader { + type Error = String; + + fn try_from(other: FuzzExecutionPayloadHeader) -> Result { + Ok(Self { + parent_hash: other.parent_hash.into(), + fee_recipient: other.fee_recipient.into(), + state_root: other.state_root.into(), + receipts_root: other.receipts_root.into(), + logs_bloom: other.logs_bloom.into(), + prev_randao: other.prev_randao.into(), + block_number: other.block_number, + gas_limit: other.gas_limit, + gas_used: other.gas_used, + timestamp: other.timestamp, + extra_data: other.extra_data.into(), + base_fee_per_gas: other.base_fee_per_gas.into(), + block_hash: other.block_hash.into(), + transactions_root: other.transactions_root.into(), + withdrawals_root: other.withdrawals_root.into(), + }) + } +} + +impl TryFrom for BeaconHeader { + type Error = String; + + fn try_from(other: FuzzBeaconHeader) -> Result { + Ok(Self { + slot: other.slot, + proposer_index: other.proposer_index, + parent_root: other.parent_root.into(), + state_root: other.state_root.into(), + body_root: other.body_root.into(), + }) + } +} + +impl TryFrom for ExecutionHeaderUpdate { + type Error = String; + + fn try_from(other: FuzzExecutionHeaderUpdate) -> Result { + let ancestry_proof: Option = + other.ancestry_proof.map(|fuzz_update| fuzz_update.try_into().unwrap()); + + Ok(Self { + header: other.header.try_into().unwrap(), + ancestry_proof, + execution_header: other.execution_header.try_into().unwrap(), + execution_branch: other + .execution_branch + .iter() + .map(|&hash| H256::from(hash)) + .collect::>() + .as_slice() + .try_into() + .unwrap(), + }) + } +} + +// The arbitrary derive implementation is super slow for the pubkey set, so create a custom +// impl for the sync committee. +impl Arbitrary<'_> for FuzzSyncCommittee { + fn arbitrary(u: &mut Unstructured<'_>) -> Result { + let mut pubkeys = [[0u8; 48]; 32]; + + for i in 0..32 { + pubkeys[i] = <[u8; 48]>::arbitrary(u)?; + } + + Ok(FuzzSyncCommittee { pubkeys, aggregate_pubkey: <[u8; 48]>::arbitrary(u)? }) + } +} diff --git a/bridges/snowbridge/pallets/ethereum-client/fuzz/src/lib.rs b/bridges/snowbridge/pallets/ethereum-client/fuzz/src/lib.rs new file mode 100644 index 000000000000..fb880c01820a --- /dev/null +++ b/bridges/snowbridge/pallets/ethereum-client/fuzz/src/lib.rs @@ -0,0 +1,2 @@ +pub mod impls; +pub mod types; diff --git a/bridges/snowbridge/pallets/ethereum-client/fuzz/src/types.rs b/bridges/snowbridge/pallets/ethereum-client/fuzz/src/types.rs new file mode 100644 index 000000000000..37e5eb0d6c4b --- /dev/null +++ b/bridges/snowbridge/pallets/ethereum-client/fuzz/src/types.rs @@ -0,0 +1,83 @@ +use libfuzzer_sys::arbitrary; + +#[derive(arbitrary::Arbitrary, Debug, Clone)] +pub struct FuzzCheckpointUpdate { + pub header: FuzzBeaconHeader, + pub current_sync_committee: FuzzSyncCommittee, + pub current_sync_committee_branch: Vec<[u8; 32]>, + pub validators_root: [u8; 32], + pub block_roots_root: [u8; 32], + pub block_roots_branch: Vec<[u8; 32]>, +} + +#[derive(arbitrary::Arbitrary, Debug, Clone)] +pub struct FuzzBeaconHeader { + pub slot: u64, + pub proposer_index: u64, + pub parent_root: [u8; 32], + pub state_root: [u8; 32], + pub body_root: [u8; 32], +} + +#[derive(Debug, Clone)] +pub struct FuzzSyncCommittee { + pub pubkeys: [[u8; 48]; 32], + pub aggregate_pubkey: [u8; 48], +} + +#[derive(arbitrary::Arbitrary, Debug, Clone)] +pub struct FuzzUpdate { + pub attested_header: FuzzBeaconHeader, + pub sync_aggregate: FuzzSyncAggregate, + pub signature_slot: u64, + pub next_sync_committee_update: Option, + pub finalized_header: FuzzBeaconHeader, + pub finality_branch: Vec<[u8; 32]>, + pub block_roots_root: [u8; 32], + pub block_roots_branch: Vec<[u8; 32]>, +} + +#[derive(arbitrary::Arbitrary, Debug, Clone)] +pub struct FuzzSyncAggregate { + pub sync_committee_bits: [u8; 4], + pub sync_committee_signature: [u8; 96], +} + +#[derive(arbitrary::Arbitrary, Debug, Clone)] +pub struct FuzzNextSyncCommitteeUpdate { + pub next_sync_committee: FuzzSyncCommittee, + pub next_sync_committee_branch: Vec<[u8; 32]>, +} + +#[derive(arbitrary::Arbitrary, Debug, Clone)] +pub struct FuzzExecutionHeaderUpdate { + pub header: FuzzBeaconHeader, + pub ancestry_proof: Option, + pub execution_header: FuzzExecutionPayloadHeader, + pub execution_branch: Vec<[u8; 32]>, +} + +#[derive(arbitrary::Arbitrary, Debug, Clone)] +pub struct FuzzAncestryProof { + pub header_branch: Vec<[u8; 32]>, + pub finalized_block_root: [u8; 32], +} + +#[derive(arbitrary::Arbitrary, Debug, Clone)] +pub struct FuzzExecutionPayloadHeader { + pub parent_hash: [u8; 32], + pub fee_recipient: [u8; 20], + pub state_root: [u8; 32], + pub receipts_root: [u8; 32], + pub logs_bloom: Vec, + pub prev_randao: [u8; 32], + pub block_number: u64, + pub gas_limit: u64, + pub gas_used: u64, + pub timestamp: u64, + pub extra_data: Vec, + pub base_fee_per_gas: [u8; 32], + pub block_hash: [u8; 32], + pub transactions_root: [u8; 32], + pub withdrawals_root: [u8; 32], +} diff --git a/bridges/snowbridge/pallets/inbound-queue/Cargo.toml b/bridges/snowbridge/pallets/inbound-queue/Cargo.toml index 1081b162ddae..dc853dc0a51d 100644 --- a/bridges/snowbridge/pallets/inbound-queue/Cargo.toml +++ b/bridges/snowbridge/pallets/inbound-queue/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-pallet-inbound-queue" description = "Snowbridge Inbound Queue Pallet" -version = "0.0.0" +version = "0.9.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs index 110f611c6766..d75c3c6a1863 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs @@ -88,6 +88,7 @@ impl pallet_balances::Config for Test { type MaxFreezes = (); type RuntimeHoldReason = (); type RuntimeFreezeReason = (); + type MaxHolds = (); } parameter_types! { diff --git a/bridges/snowbridge/pallets/outbound-queue/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/Cargo.toml index ae52fb3e5c49..03ddb12b7b4a 100644 --- a/bridges/snowbridge/pallets/outbound-queue/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-pallet-outbound-queue" description = "Snowbridge Outbound Queue Pallet" -version = "0.0.0" +version = "0.9.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml index c185d5af7062..503dc14bbc9d 100644 --- a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-outbound-queue-merkle-tree" description = "Snowbridge Outbound Queue Merkle Tree" -version = "0.1.1" +version = "0.9.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml index 347b3bae493b..0f0bfc4e3580 100644 --- a/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-outbound-queue-runtime-api" description = "Snowbridge Outbound Queue Runtime API" -version = "0.0.0" +version = "0.9.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/pallets/system/Cargo.toml b/bridges/snowbridge/pallets/system/Cargo.toml index aa600511633b..f365b52e84db 100644 --- a/bridges/snowbridge/pallets/system/Cargo.toml +++ b/bridges/snowbridge/pallets/system/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-pallet-system" description = "Snowbridge System Pallet" -version = "0.0.0" +version = "0.9.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml b/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml index 355d2d29147f..6d3735f776bf 100644 --- a/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml +++ b/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-system-runtime-api" description = "Snowbridge System Runtime API" -version = "0.0.0" +version = "0.9.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/pallets/system/src/lib.rs b/bridges/snowbridge/pallets/system/src/lib.rs index b7f38fb753d3..13abee14b4af 100644 --- a/bridges/snowbridge/pallets/system/src/lib.rs +++ b/bridges/snowbridge/pallets/system/src/lib.rs @@ -79,6 +79,8 @@ use xcm_executor::traits::ConvertLocation; #[cfg(feature = "runtime-benchmarks")] use frame_support::traits::OriginTrait; +pub use pallet::*; + pub type BalanceOf = <::Token as Inspect<::AccountId>>::Balance; pub type AccountIdOf = ::AccountId; diff --git a/bridges/snowbridge/pallets/system/src/mock.rs b/bridges/snowbridge/pallets/system/src/mock.rs index edc3f141b073..bc2295781327 100644 --- a/bridges/snowbridge/pallets/system/src/mock.rs +++ b/bridges/snowbridge/pallets/system/src/mock.rs @@ -136,6 +136,7 @@ impl pallet_balances::Config for Test { type MaxFreezes = (); type RuntimeHoldReason = (); type RuntimeFreezeReason = (); + type MaxHolds = (); } impl pallet_xcm_origin::Config for Test { diff --git a/bridges/snowbridge/primitives/beacon/Cargo.toml b/bridges/snowbridge/primitives/beacon/Cargo.toml index b59e5191b418..bb30eae33b1e 100644 --- a/bridges/snowbridge/primitives/beacon/Cargo.toml +++ b/bridges/snowbridge/primitives/beacon/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-beacon-primitives" description = "Snowbridge Beacon Primitives" -version = "0.0.0" +version = "0.9.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/primitives/beacon/src/types.rs b/bridges/snowbridge/primitives/beacon/src/types.rs index 2af522f56b0d..6f0886ba8b5c 100644 --- a/bridges/snowbridge/primitives/beacon/src/types.rs +++ b/bridges/snowbridge/primitives/beacon/src/types.rs @@ -586,7 +586,7 @@ pub mod deneb { use sp_std::prelude::*; /// ExecutionPayloadHeader - /// + /// https://github.com/ethereum/consensus-specs/blob/dev/specs/deneb/beacon-chain.md#executionpayloadheader #[derive( Default, Encode, Decode, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, TypeInfo, )] diff --git a/bridges/snowbridge/primitives/core/Cargo.toml b/bridges/snowbridge/primitives/core/Cargo.toml index f735d01a848f..8df3ac21f4b0 100644 --- a/bridges/snowbridge/primitives/core/Cargo.toml +++ b/bridges/snowbridge/primitives/core/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-core" description = "Snowbridge Core" -version = "0.0.0" +version = "0.9.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/primitives/ethereum/Cargo.toml b/bridges/snowbridge/primitives/ethereum/Cargo.toml index 047ebdd56695..710a056daa5f 100644 --- a/bridges/snowbridge/primitives/ethereum/Cargo.toml +++ b/bridges/snowbridge/primitives/ethereum/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-ethereum" description = "Snowbridge Ethereum" -version = "0.1.0" +version = "0.9.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/primitives/router/Cargo.toml b/bridges/snowbridge/primitives/router/Cargo.toml index 712c60c2148f..6c0d6aefed99 100644 --- a/bridges/snowbridge/primitives/router/Cargo.toml +++ b/bridges/snowbridge/primitives/router/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-router-primitives" description = "Snowbridge Router Primitives" -version = "0.0.0" +version = "0.9.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/runtime/runtime-common/Cargo.toml b/bridges/snowbridge/runtime/runtime-common/Cargo.toml index f5b44b25585a..12f9cfa42d91 100644 --- a/bridges/snowbridge/runtime/runtime-common/Cargo.toml +++ b/bridges/snowbridge/runtime/runtime-common/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-runtime-common" description = "Snowbridge Runtime Common" -version = "0.0.0" +version = "0.9.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/runtime/test-common/Cargo.toml b/bridges/snowbridge/runtime/test-common/Cargo.toml index a2994e618913..7c02ac40b63b 100644 --- a/bridges/snowbridge/runtime/test-common/Cargo.toml +++ b/bridges/snowbridge/runtime/test-common/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-runtime-test-common" description = "Snowbridge Runtime Tests" -version = "0.0.0" +version = "0.9.0" authors = ["Snowfork "] edition = "2021" license = "Apache-2.0" diff --git a/bridges/snowbridge/rustfmt.toml b/bridges/snowbridge/rustfmt.toml new file mode 100644 index 000000000000..c34215391239 --- /dev/null +++ b/bridges/snowbridge/rustfmt.toml @@ -0,0 +1,24 @@ +# Basic +edition = "2021" +hard_tabs = true +max_width = 100 +use_small_heuristics = "Max" +# Imports +imports_granularity = "Crate" +reorder_imports = true +# Consistency +newline_style = "Unix" +# Misc +chain_width = 80 +spaces_around_ranges = false +binop_separator = "Back" +reorder_impl_items = false +match_arm_leading_pipes = "Preserve" +match_arm_blocks = false +match_block_trailing_comma = true +trailing_comma = "Vertical" +trailing_semicolon = false +use_field_init_shorthand = true +# Format comments +comment_width = 100 +wrap_comments = true diff --git a/bridges/snowbridge/scripts/contribute-upstream.sh b/bridges/snowbridge/scripts/contribute-upstream.sh new file mode 100755 index 000000000000..8aa2d2a7035e --- /dev/null +++ b/bridges/snowbridge/scripts/contribute-upstream.sh @@ -0,0 +1,80 @@ +#!/bin/bash + +# A script to cleanup the Snowfork fork of the polkadot-sdk to contribute it upstream back to parity/polkadot-sdk +# ./bridges/snowbridge/scripts/contribute-upstream.sh + +# show CLI help +function show_help() { + set +x + echo " " + echo Error: $1 + echo "Usage:" + echo " ./bridges/snowbridge/scripts/contribute-upstream.sh Exit with code 0 if pallets code is well decoupled from the other code in the repo" + exit 1 +} + +if [[ -z "$1" ]]; then + echo "Please provide a branch name you would like your upstream branch to be named" + exit 1 +fi + +branch_name=$1 + +set -eux + +# let's avoid any restrictions on where this script can be called for - snowbridge repo may be +# plugged into any other repo folder. So the script (and other stuff that needs to be removed) +# may be located either in call dir, or one of it subdirs. +SNOWBRIDGE_FOLDER="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )/../" + +# Get the current Git branch name +current_branch=$(git rev-parse --abbrev-ref HEAD) + +if [ "$current_branch" = "$branch_name" ] || git branch | grep -q "$branch_name"; then + echo "Already on requested branch or branch exists, not creating." +else + git branch "$branch_name" +fi + +git checkout "$branch_name" + +# remove everything we think is not required for our needs +rm -rf rust-toolchain.toml +rm -rf $SNOWBRIDGE_FOLDER/.cargo +rm -rf $SNOWBRIDGE_FOLDER/.github +rm -rf $SNOWBRIDGE_FOLDER/SECURITY.md +rm -rf $SNOWBRIDGE_FOLDER/.gitignore +rm -rf $SNOWBRIDGE_FOLDER/templates +rm -rf $SNOWBRIDGE_FOLDER/pallets/ethereum-client/fuzz + +pushd $SNOWBRIDGE_FOLDER + +# let's test if everything we need compiles +cargo check -p snowbridge-pallet-ethereum-client +cargo check -p snowbridge-pallet-ethereum-client --features runtime-benchmarks +cargo check -p snowbridge-pallet-ethereum-client --features try-runtime +cargo check -p snowbridge-pallet-inbound-queue +cargo check -p snowbridge-pallet-inbound-queue --features runtime-benchmarks +cargo check -p snowbridge-pallet-inbound-queue --features try-runtime +cargo check -p snowbridge-pallet-outbound-queue +cargo check -p snowbridge-pallet-outbound-queue --features runtime-benchmarks +cargo check -p snowbridge-pallet-outbound-queue --features try-runtime +cargo check -p snowbridge-pallet-system +cargo check -p snowbridge-pallet-system --features runtime-benchmarks +cargo check -p snowbridge-pallet-system --features try-runtime + +# we're removing lock file after all checks are done. Otherwise we may use different +# Substrate/Polkadot/Cumulus commits and our checks will fail +rm -f $SNOWBRIDGE_FOLDER/Cargo.toml +rm -f $SNOWBRIDGE_FOLDER/Cargo.lock + +popd + +# Replace Parity's CI files, that we have overwritten in our fork, to run our own CI +rm -rf .github +git remote -v | grep -w parity || git remote add parity https://github.com/paritytech/polkadot-sdk +git fetch parity master +git checkout parity/master -- .github +git add -- .github + +echo "OK" diff --git a/bridges/snowbridge/templates/benchmarking-fixtures.mustache b/bridges/snowbridge/templates/benchmarking-fixtures.mustache new file mode 100644 index 000000000000..abeab79a5326 --- /dev/null +++ b/bridges/snowbridge/templates/benchmarking-fixtures.mustache @@ -0,0 +1,179 @@ +// Generated, do not edit! +// See README.md for instructions to generate +use crate::{ + CheckpointUpdate, ExecutionHeaderUpdate, Update, +}; +use hex_literal::hex; +use primitives::{ + types::deneb, updates::AncestryProof, BeaconHeader, NextSyncCommitteeUpdate, SyncAggregate, + SyncCommittee, VersionedExecutionPayloadHeader, +}; +use sp_core::U256; +use sp_std::{boxed::Box, vec}; + +pub fn make_checkpoint() -> Box { + Box::new(CheckpointUpdate { + header: BeaconHeader { + slot: {{CheckpointUpdate.Header.Slot}}, + proposer_index: {{CheckpointUpdate.Header.ProposerIndex}}, + parent_root: hex!("{{CheckpointUpdate.Header.ParentRoot}}").into(), + state_root: hex!("{{CheckpointUpdate.Header.StateRoot}}").into(), + body_root: hex!("{{CheckpointUpdate.Header.BodyRoot}}").into(), + }, + current_sync_committee: SyncCommittee { + pubkeys: [ + {{#CheckpointUpdate.CurrentSyncCommittee.Pubkeys}} + hex!("{{.}}").into(), + {{/CheckpointUpdate.CurrentSyncCommittee.Pubkeys}} + ], + aggregate_pubkey: hex!("{{CheckpointUpdate.CurrentSyncCommittee.AggregatePubkey}}").into(), + }, + current_sync_committee_branch: vec![ + {{#CheckpointUpdate.CurrentSyncCommitteeBranch}} + hex!("{{.}}").into(), + {{/CheckpointUpdate.CurrentSyncCommitteeBranch}} + ], + validators_root: hex!("{{CheckpointUpdate.ValidatorsRoot}}").into(), + block_roots_root: hex!("{{CheckpointUpdate.BlockRootsRoot}}").into(), + block_roots_branch: vec![ + {{#CheckpointUpdate.BlockRootsBranch}} + hex!("{{.}}").into(), + {{/CheckpointUpdate.BlockRootsBranch}} + ], + }) +} + +pub fn make_sync_committee_update() -> Box { + Box::new(Update { + attested_header: BeaconHeader { + slot: {{SyncCommitteeUpdate.AttestedHeader.Slot}}, + proposer_index: {{SyncCommitteeUpdate.AttestedHeader.ProposerIndex}}, + parent_root: hex!("{{SyncCommitteeUpdate.AttestedHeader.ParentRoot}}").into(), + state_root: hex!("{{SyncCommitteeUpdate.AttestedHeader.StateRoot}}").into(), + body_root: hex!("{{SyncCommitteeUpdate.AttestedHeader.BodyRoot}}").into(), + }, + sync_aggregate: SyncAggregate{ + sync_committee_bits: hex!("{{SyncCommitteeUpdate.SyncAggregate.SyncCommitteeBits}}"), + sync_committee_signature: hex!("{{SyncCommitteeUpdate.SyncAggregate.SyncCommitteeSignature}}").into(), + }, + signature_slot: {{SyncCommitteeUpdate.SignatureSlot}}, + next_sync_committee_update: Some(NextSyncCommitteeUpdate { + next_sync_committee: SyncCommittee { + pubkeys: [ + {{#SyncCommitteeUpdate.NextSyncCommitteeUpdate.NextSyncCommittee.Pubkeys}} + hex!("{{.}}").into(), + {{/SyncCommitteeUpdate.NextSyncCommitteeUpdate.NextSyncCommittee.Pubkeys}} + ], + aggregate_pubkey: hex!("{{SyncCommitteeUpdate.NextSyncCommitteeUpdate.NextSyncCommittee.AggregatePubkey}}").into(), + }, + next_sync_committee_branch: vec![ + {{#SyncCommitteeUpdate.NextSyncCommitteeUpdate.NextSyncCommitteeBranch}} + hex!("{{.}}").into(), + {{/SyncCommitteeUpdate.NextSyncCommitteeUpdate.NextSyncCommitteeBranch}} + ], + }), + finalized_header: BeaconHeader{ + slot: {{SyncCommitteeUpdate.FinalizedHeader.Slot}}, + proposer_index: {{SyncCommitteeUpdate.FinalizedHeader.ProposerIndex}}, + parent_root: hex!("{{SyncCommitteeUpdate.FinalizedHeader.ParentRoot}}").into(), + state_root: hex!("{{SyncCommitteeUpdate.FinalizedHeader.StateRoot}}").into(), + body_root: hex!("{{SyncCommitteeUpdate.FinalizedHeader.BodyRoot}}").into(), + }, + finality_branch: vec![ + {{#SyncCommitteeUpdate.FinalityBranch}} + hex!("{{.}}").into(), + {{/SyncCommitteeUpdate.FinalityBranch}} + ], + block_roots_root: hex!("{{SyncCommitteeUpdate.BlockRootsRoot}}").into(), + block_roots_branch: vec![ + {{#SyncCommitteeUpdate.BlockRootsBranch}} + hex!("{{.}}").into(), + {{/SyncCommitteeUpdate.BlockRootsBranch}} + ], + }) +} + +pub fn make_finalized_header_update() -> Box { + Box::new(Update { + attested_header: BeaconHeader { + slot: {{FinalizedHeaderUpdate.AttestedHeader.Slot}}, + proposer_index: {{FinalizedHeaderUpdate.AttestedHeader.ProposerIndex}}, + parent_root: hex!("{{FinalizedHeaderUpdate.AttestedHeader.ParentRoot}}").into(), + state_root: hex!("{{FinalizedHeaderUpdate.AttestedHeader.StateRoot}}").into(), + body_root: hex!("{{FinalizedHeaderUpdate.AttestedHeader.BodyRoot}}").into(), + }, + sync_aggregate: SyncAggregate{ + sync_committee_bits: hex!("{{FinalizedHeaderUpdate.SyncAggregate.SyncCommitteeBits}}"), + sync_committee_signature: hex!("{{FinalizedHeaderUpdate.SyncAggregate.SyncCommitteeSignature}}").into(), + }, + signature_slot: {{FinalizedHeaderUpdate.SignatureSlot}}, + next_sync_committee_update: None, + finalized_header: BeaconHeader { + slot: {{FinalizedHeaderUpdate.FinalizedHeader.Slot}}, + proposer_index: {{FinalizedHeaderUpdate.FinalizedHeader.ProposerIndex}}, + parent_root: hex!("{{FinalizedHeaderUpdate.FinalizedHeader.ParentRoot}}").into(), + state_root: hex!("{{FinalizedHeaderUpdate.FinalizedHeader.StateRoot}}").into(), + body_root: hex!("{{FinalizedHeaderUpdate.FinalizedHeader.BodyRoot}}").into(), + }, + finality_branch: vec![ + {{#FinalizedHeaderUpdate.FinalityBranch}} + hex!("{{.}}").into(), + {{/FinalizedHeaderUpdate.FinalityBranch}} + ], + block_roots_root: hex!("{{FinalizedHeaderUpdate.BlockRootsRoot}}").into(), + block_roots_branch: vec![ + {{#FinalizedHeaderUpdate.BlockRootsBranch}} + hex!("{{.}}").into(), + {{/FinalizedHeaderUpdate.BlockRootsBranch}} + ] + }) +} + +pub fn make_execution_header_update() -> Box { + Box::new(ExecutionHeaderUpdate { + header: BeaconHeader { + slot: {{HeaderUpdate.Header.Slot}}, + proposer_index: {{HeaderUpdate.Header.ProposerIndex}}, + parent_root: hex!("{{HeaderUpdate.Header.ParentRoot}}").into(), + state_root: hex!("{{HeaderUpdate.Header.StateRoot}}").into(), + body_root: hex!("{{HeaderUpdate.Header.BodyRoot}}").into(), + }, + {{#HeaderUpdate.AncestryProof}} + ancestry_proof: Some(AncestryProof { + header_branch: vec![ + {{#HeaderUpdate.AncestryProof.HeaderBranch}} + hex!("{{.}}").into(), + {{/HeaderUpdate.AncestryProof.HeaderBranch}} + ], + finalized_block_root: hex!("{{HeaderUpdate.AncestryProof.FinalizedBlockRoot}}").into(), + }), + {{/HeaderUpdate.AncestryProof}} + {{^HeaderUpdate.AncestryProof}} + ancestry_proof: None, + {{/HeaderUpdate.AncestryProof}} + execution_header: VersionedExecutionPayloadHeader::Deneb(deneb::ExecutionPayloadHeader { + parent_hash: hex!("{{HeaderUpdate.ExecutionHeader.Deneb.ParentHash}}").into(), + fee_recipient: hex!("{{HeaderUpdate.ExecutionHeader.Deneb.FeeRecipient}}").into(), + state_root: hex!("{{HeaderUpdate.ExecutionHeader.Deneb.StateRoot}}").into(), + receipts_root: hex!("{{HeaderUpdate.ExecutionHeader.Deneb.ReceiptsRoot}}").into(), + logs_bloom: hex!("{{HeaderUpdate.ExecutionHeader.Deneb.LogsBloom}}").into(), + prev_randao: hex!("{{HeaderUpdate.ExecutionHeader.Deneb.PrevRandao}}").into(), + block_number: {{HeaderUpdate.ExecutionHeader.Deneb.BlockNumber}}, + gas_limit: {{HeaderUpdate.ExecutionHeader.Deneb.GasLimit}}, + gas_used: {{HeaderUpdate.ExecutionHeader.Deneb.GasUsed}}, + timestamp: {{HeaderUpdate.ExecutionHeader.Deneb.Timestamp}}, + extra_data: hex!("{{HeaderUpdate.ExecutionHeader.Deneb.ExtraData}}").into(), + base_fee_per_gas: U256::from({{HeaderUpdate.ExecutionHeader.Deneb.BaseFeePerGas}}u64), + block_hash: hex!("{{HeaderUpdate.ExecutionHeader.Deneb.BlockHash}}").into(), + transactions_root: hex!("{{HeaderUpdate.ExecutionHeader.Deneb.TransactionRoot}}").into(), + withdrawals_root: hex!("{{HeaderUpdate.ExecutionHeader.Deneb.WithdrawalsRoot}}").into(), + blob_gas_used: {{HeaderUpdate.ExecutionHeader.Deneb.BlobGasUsed}}, + excess_blob_gas: {{HeaderUpdate.ExecutionHeader.Deneb.ExcessBlobGas}}, + }), + execution_branch: vec![ + {{#HeaderUpdate.ExecutionBranch}} + hex!("{{.}}").into(), + {{/HeaderUpdate.ExecutionBranch}} + ], + }) +} diff --git a/bridges/snowbridge/templates/module-weight-template.hbs b/bridges/snowbridge/templates/module-weight-template.hbs new file mode 100644 index 000000000000..5919a7cc7c14 --- /dev/null +++ b/bridges/snowbridge/templates/module-weight-template.hbs @@ -0,0 +1,74 @@ +{{header}} +//! Autogenerated weights for `{{pallet}}` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION {{version}} +//! DATE: {{date}}, STEPS: `{{cmd.steps}}`, REPEAT: `{{cmd.repeat}}`, LOW RANGE: `{{cmd.lowest_range_values}}`, HIGH RANGE: `{{cmd.highest_range_values}}` +//! WORST CASE MAP SIZE: `{{cmd.worst_case_map_values}}` +//! HOSTNAME: `{{hostname}}`, CPU: `{{cpuname}}` +//! WASM-EXECUTION: `{{cmd.wasm_execution}}`, CHAIN: `{{cmd.chain}}`, DB CACHE: `{{cmd.db_cache}}` + +// Executed Command: +{{#each args as |arg|}} +// {{arg}} +{{/each}} + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `{{pallet}}`. +pub trait WeightInfo { + {{#each benchmarks as |benchmark|}} + fn {{benchmark.name~}} + ( + {{~#each benchmark.components as |c| ~}} + {{c.name}}: u32, {{/each~}} + ) -> Weight; + {{/each}} +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + {{#each benchmarks as |benchmark|}} + {{#each benchmark.comments as |comment|}} + /// {{comment}} + {{/each}} + {{#each benchmark.component_ranges as |range|}} + /// The range of component `{{range.name}}` is `[{{range.min}}, {{range.max}}]`. + {{/each}} + fn {{benchmark.name~}} + ( + {{~#each benchmark.components as |c| ~}} + {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} + ) -> Weight { + // Proof Size summary in bytes: + // Measured: `{{benchmark.base_recorded_proof_size}}{{#each benchmark.component_recorded_proof_size as |cp|}} + {{cp.name}} * ({{cp.slope}} ±{{underscore cp.error}}){{/each}}` + // Estimated: `{{benchmark.base_calculated_proof_size}}{{#each benchmark.component_calculated_proof_size as |cp|}} + {{cp.name}} * ({{cp.slope}} ±{{underscore cp.error}}){{/each}}` + // Minimum execution time: {{underscore benchmark.min_execution_time}}_000 picoseconds. + Weight::from_parts({{underscore benchmark.base_weight}}, {{benchmark.base_calculated_proof_size}}) + {{#each benchmark.component_weight as |cw|}} + // Standard Error: {{underscore cw.error}} + .saturating_add(Weight::from_parts({{underscore cw.slope}}, 0).saturating_mul({{cw.name}}.into())) + {{/each}} + {{#if (ne benchmark.base_reads "0")}} + .saturating_add(RocksDbWeight::get().reads({{benchmark.base_reads}}_u64)) + {{/if}} + {{#each benchmark.component_reads as |cr|}} + .saturating_add(RocksDbWeight::get().reads(({{cr.slope}}_u64).saturating_mul({{cr.name}}.into()))) + {{/each}} + {{#if (ne benchmark.base_writes "0")}} + .saturating_add(RocksDbWeight::get().writes({{benchmark.base_writes}}_u64)) + {{/if}} + {{#each benchmark.component_writes as |cw|}} + .saturating_add(RocksDbWeight::get().writes(({{cw.slope}}_u64).saturating_mul({{cw.name}}.into()))) + {{/each}} + {{#each benchmark.component_calculated_proof_size as |cp|}} + .saturating_add(Weight::from_parts(0, {{cp.slope}}).saturating_mul({{cp.name}}.into())) + {{/each}} + } + {{/each}} +} diff --git a/bridges/snowbridge/templates/register_token.mustache b/bridges/snowbridge/templates/register_token.mustache new file mode 100644 index 000000000000..e8d35be42ec1 --- /dev/null +++ b/bridges/snowbridge/templates/register_token.mustache @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +// Generated, do not edit! +// See ethereum client README.md for instructions to generate + +use crate::InboundQueueFixture; +use hex_literal::hex; +use snowbridge_beacon_primitives::CompactExecutionHeader; +use snowbridge_core::inbound::{Log, Message, Proof}; +use sp_std::vec; + +pub fn make_register_token_message() -> InboundQueueFixture { + InboundQueueFixture { + execution_header: CompactExecutionHeader{ + parent_hash: hex!("{{InboundMessageTest.ExecutionHeader.ParentHash}}").into(), + block_number: {{InboundMessageTest.ExecutionHeader.BlockNumber}}, + state_root: hex!("{{InboundMessageTest.ExecutionHeader.StateRoot}}").into(), + receipts_root: hex!("{{InboundMessageTest.ExecutionHeader.ReceiptsRoot}}").into(), + }, + message: Message { + event_log: Log { + address: hex!("{{InboundMessageTest.Message.EventLog.Address}}").into(), + topics: vec![ + {{#InboundMessageTest.Message.EventLog.Topics}} + hex!("{{.}}").into(), + {{/InboundMessageTest.Message.EventLog.Topics}} + ], + data: hex!("{{InboundMessageTest.Message.EventLog.Data}}").into(), + }, + proof: Proof { + block_hash: hex!("{{InboundMessageTest.Message.Proof.BlockHash}}").into(), + tx_index: {{InboundMessageTest.Message.Proof.TxIndex}}, + data: (vec![ + {{#InboundMessageTest.Message.Proof.Data.Keys}} + hex!("{{.}}").to_vec(), + {{/InboundMessageTest.Message.Proof.Data.Keys}} + ], vec![ + {{#InboundMessageTest.Message.Proof.Data.Values}} + hex!("{{.}}").to_vec(), + {{/InboundMessageTest.Message.Proof.Data.Values}} + ]), + }, + }, + } +} diff --git a/bridges/snowbridge/templates/send_token.mustache b/bridges/snowbridge/templates/send_token.mustache new file mode 100644 index 000000000000..e104d2da279d --- /dev/null +++ b/bridges/snowbridge/templates/send_token.mustache @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +// Generated, do not edit! +// See ethereum client README.md for instructions to generate + +use crate::InboundQueueFixture; +use hex_literal::hex; +use snowbridge_beacon_primitives::CompactExecutionHeader; +use snowbridge_core::inbound::{Log, Message, Proof}; +use sp_std::vec; + +pub fn make_send_token_message() -> InboundQueueFixture { + InboundQueueFixture { + execution_header: CompactExecutionHeader{ + parent_hash: hex!("{{InboundMessageTest.ExecutionHeader.ParentHash}}").into(), + block_number: {{InboundMessageTest.ExecutionHeader.BlockNumber}}, + state_root: hex!("{{InboundMessageTest.ExecutionHeader.StateRoot}}").into(), + receipts_root: hex!("{{InboundMessageTest.ExecutionHeader.ReceiptsRoot}}").into(), + }, + message: Message { + event_log: Log { + address: hex!("{{InboundMessageTest.Message.EventLog.Address}}").into(), + topics: vec![ + {{#InboundMessageTest.Message.EventLog.Topics}} + hex!("{{.}}").into(), + {{/InboundMessageTest.Message.EventLog.Topics}} + ], + data: hex!("{{InboundMessageTest.Message.EventLog.Data}}").into(), + }, + proof: Proof { + block_hash: hex!("{{InboundMessageTest.Message.Proof.BlockHash}}").into(), + tx_index: {{InboundMessageTest.Message.Proof.TxIndex}}, + data: (vec![ + {{#InboundMessageTest.Message.Proof.Data.Keys}} + hex!("{{.}}").to_vec(), + {{/InboundMessageTest.Message.Proof.Data.Keys}} + ], vec![ + {{#InboundMessageTest.Message.Proof.Data.Values}} + hex!("{{.}}").to_vec(), + {{/InboundMessageTest.Message.Proof.Data.Values}} + ]), + }, + }, + } +} diff --git a/polkadot/node/subsystem-bench/src/approval/mod.rs b/polkadot/node/subsystem-bench/src/approval/mod.rs index 3544ce74711e..055aeb193456 100644 --- a/polkadot/node/subsystem-bench/src/approval/mod.rs +++ b/polkadot/node/subsystem-bench/src/approval/mod.rs @@ -29,7 +29,9 @@ use crate::{ }, core::{ configuration::{TestAuthorities, TestConfiguration}, - environment::{TestEnvironment, TestEnvironmentDependencies, MAX_TIME_OF_FLIGHT}, + environment::{ + BenchmarkUsage, TestEnvironment, TestEnvironmentDependencies, MAX_TIME_OF_FLIGHT, + }, mock::{ dummy_builder, network_bridge::{MockNetworkBridgeRx, MockNetworkBridgeTx}, @@ -876,7 +878,11 @@ fn prepare_test_inner( ) } -pub async fn bench_approvals(env: &mut TestEnvironment, mut state: ApprovalTestState) { +pub async fn bench_approvals( + benchmark_name: &str, + env: &mut TestEnvironment, + mut state: ApprovalTestState, +) -> BenchmarkUsage { let producer_rx = state .start_message_production( env.network(), @@ -885,15 +891,16 @@ pub async fn bench_approvals(env: &mut TestEnvironment, mut state: ApprovalTestS env.registry().clone(), ) .await; - bench_approvals_run(env, state, producer_rx).await + bench_approvals_run(benchmark_name, env, state, producer_rx).await } /// Runs the approval benchmark. pub async fn bench_approvals_run( + benchmark_name: &str, env: &mut TestEnvironment, state: ApprovalTestState, producer_rx: oneshot::Receiver<()>, -) { +) -> BenchmarkUsage { let config = env.config().clone(); env.metrics().set_n_validators(config.n_validators); @@ -1054,6 +1061,5 @@ pub async fn bench_approvals_run( state.total_unique_messages.load(std::sync::atomic::Ordering::SeqCst) ); - env.display_network_usage(); - env.display_cpu_usage(&["approval-distribution", "approval-voting"]); + env.collect_resource_usage(benchmark_name, &["approval-distribution", "approval-voting"]) } diff --git a/polkadot/node/subsystem-bench/src/availability/mod.rs b/polkadot/node/subsystem-bench/src/availability/mod.rs index f7f1184448b3..56ec6705b7e3 100644 --- a/polkadot/node/subsystem-bench/src/availability/mod.rs +++ b/polkadot/node/subsystem-bench/src/availability/mod.rs @@ -13,7 +13,10 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use crate::{core::mock::ChainApiState, TestEnvironment}; +use crate::{ + core::{environment::BenchmarkUsage, mock::ChainApiState}, + TestEnvironment, +}; use av_store::NetworkAvailabilityState; use bitvec::bitvec; use colored::Colorize; @@ -430,7 +433,11 @@ impl TestState { } } -pub async fn benchmark_availability_read(env: &mut TestEnvironment, mut state: TestState) { +pub async fn benchmark_availability_read( + benchmark_name: &str, + env: &mut TestEnvironment, + mut state: TestState, +) -> BenchmarkUsage { let config = env.config().clone(); env.import_block(new_block_import_info(Hash::repeat_byte(1), 1)).await; @@ -490,12 +497,15 @@ pub async fn benchmark_availability_read(env: &mut TestEnvironment, mut state: T format!("{} ms", test_start.elapsed().as_millis() / env.config().num_blocks as u128).red() ); - env.display_network_usage(); - env.display_cpu_usage(&["availability-recovery"]); env.stop().await; + env.collect_resource_usage(benchmark_name, &["availability-recovery"]) } -pub async fn benchmark_availability_write(env: &mut TestEnvironment, mut state: TestState) { +pub async fn benchmark_availability_write( + benchmark_name: &str, + env: &mut TestEnvironment, + mut state: TestState, +) -> BenchmarkUsage { let config = env.config().clone(); env.metrics().set_n_validators(config.n_validators); @@ -648,15 +658,11 @@ pub async fn benchmark_availability_write(env: &mut TestEnvironment, mut state: format!("{} ms", test_start.elapsed().as_millis() / env.config().num_blocks as u128).red() ); - env.display_network_usage(); - - env.display_cpu_usage(&[ - "availability-distribution", - "bitfield-distribution", - "availability-store", - ]); - env.stop().await; + env.collect_resource_usage( + benchmark_name, + &["availability-distribution", "bitfield-distribution", "availability-store"], + ) } pub fn peer_bitfield_message_v2( diff --git a/polkadot/node/subsystem-bench/src/cli.rs b/polkadot/node/subsystem-bench/src/cli.rs index bfce8cc183a9..21f5e6a85629 100644 --- a/polkadot/node/subsystem-bench/src/cli.rs +++ b/polkadot/node/subsystem-bench/src/cli.rs @@ -40,6 +40,22 @@ pub enum TestObjective { Unimplemented, } +impl std::fmt::Display for TestObjective { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}", + match self { + Self::DataAvailabilityRead(_) => "DataAvailabilityRead", + Self::DataAvailabilityWrite => "DataAvailabilityWrite", + Self::TestSequence(_) => "TestSequence", + Self::ApprovalVoting(_) => "ApprovalVoting", + Self::Unimplemented => "Unimplemented", + } + ) + } +} + #[derive(Debug, clap::Parser)] #[clap(rename_all = "kebab-case")] #[allow(missing_docs)] diff --git a/polkadot/node/subsystem-bench/src/core/environment.rs b/polkadot/node/subsystem-bench/src/core/environment.rs index 59bfed7f1120..cffda38f2110 100644 --- a/polkadot/node/subsystem-bench/src/core/environment.rs +++ b/polkadot/node/subsystem-bench/src/core/environment.rs @@ -22,6 +22,7 @@ use colored::Colorize; use core::time::Duration; use futures::{Future, FutureExt}; use polkadot_overseer::{BlockInfo, Handle as OverseerHandle}; +use serde::{Deserialize, Serialize}; use polkadot_node_subsystem::{messages::AllMessages, Overseer, SpawnGlue, TimeoutExt}; use polkadot_node_subsystem_types::Hash; @@ -347,57 +348,102 @@ impl TestEnvironment { } } - /// Display network usage stats. - pub fn display_network_usage(&self) { - let stats = self.network().peer_stats(0); - - let total_node_received = stats.received() / 1024; - let total_node_sent = stats.sent() / 1024; - - println!( - "\nPayload bytes received from peers: {}, {}", - format!("{:.2} KiB total", total_node_received).blue(), - format!("{:.2} KiB/block", total_node_received / self.config().num_blocks) - .bright_blue() - ); + pub fn collect_resource_usage( + &self, + benchmark_name: &str, + subsystems_under_test: &[&str], + ) -> BenchmarkUsage { + BenchmarkUsage { + benchmark_name: benchmark_name.to_string(), + network_usage: self.network_usage(), + cpu_usage: self.cpu_usage(subsystems_under_test), + } + } - println!( - "Payload bytes sent to peers: {}, {}", - format!("{:.2} KiB total", total_node_sent).blue(), - format!("{:.2} KiB/block", total_node_sent / self.config().num_blocks).bright_blue() - ); + fn network_usage(&self) -> Vec { + let stats = self.network().peer_stats(0); + let total_node_received = (stats.received() / 1024) as f64; + let total_node_sent = (stats.sent() / 1024) as f64; + let num_blocks = self.config().num_blocks as f64; + + vec![ + ResourceUsage { + resource_name: "Received from peers".to_string(), + total: total_node_received, + per_block: total_node_received / num_blocks, + }, + ResourceUsage { + resource_name: "Sent to peers".to_string(), + total: total_node_sent, + per_block: total_node_sent / num_blocks, + }, + ] } - /// Print CPU usage stats in the CLI. - pub fn display_cpu_usage(&self, subsystems_under_test: &[&str]) { + fn cpu_usage(&self, subsystems_under_test: &[&str]) -> Vec { let test_metrics = super::display::parse_metrics(self.registry()); + let mut usage = vec![]; + let num_blocks = self.config().num_blocks as f64; for subsystem in subsystems_under_test.iter() { let subsystem_cpu_metrics = test_metrics.subset_with_label_value("task_group", subsystem); let total_cpu = subsystem_cpu_metrics.sum_by("substrate_tasks_polling_duration_sum"); - println!( - "{} CPU usage {}", - subsystem.to_string().bright_green(), - format!("{:.3}s", total_cpu).bright_purple() - ); - println!( - "{} CPU usage per block {}", - subsystem.to_string().bright_green(), - format!("{:.3}s", total_cpu / self.config().num_blocks as f64).bright_purple() - ); + usage.push(ResourceUsage { + resource_name: subsystem.to_string(), + total: total_cpu, + per_block: total_cpu / num_blocks, + }); } let test_env_cpu_metrics = test_metrics.subset_with_label_value("task_group", "test-environment"); let total_cpu = test_env_cpu_metrics.sum_by("substrate_tasks_polling_duration_sum"); - println!( - "Total test environment CPU usage {}", - format!("{:.3}s", total_cpu).bright_purple() - ); - println!( - "Test environment CPU usage per block {}", - format!("{:.3}s", total_cpu / self.config().num_blocks as f64).bright_purple() + + usage.push(ResourceUsage { + resource_name: "Test environment".to_string(), + total: total_cpu, + per_block: total_cpu / num_blocks, + }); + + usage + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct BenchmarkUsage { + benchmark_name: String, + network_usage: Vec, + cpu_usage: Vec, +} + +impl std::fmt::Display for BenchmarkUsage { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!( + f, + "\n{}\n\n{}\n{}\n\n{}\n{}\n", + self.benchmark_name.purple(), + format!("{:<32}{:>12}{:>12}", "Network usage, KiB", "total", "per block").blue(), + self.network_usage + .iter() + .map(|v| v.to_string()) + .collect::>() + .join("\n"), + format!("{:<32}{:>12}{:>12}", "CPU usage in seconds", "total", "per block").blue(), + self.cpu_usage.iter().map(|v| v.to_string()).collect::>().join("\n") ) } } + +#[derive(Debug, Serialize, Deserialize)] +pub struct ResourceUsage { + resource_name: String, + total: f64, + per_block: f64, +} + +impl std::fmt::Display for ResourceUsage { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{:<32}{:>12.3}{:>12.3}", self.resource_name.cyan(), self.total, self.per_block) + } +} diff --git a/polkadot/node/subsystem-bench/src/subsystem-bench.rs b/polkadot/node/subsystem-bench/src/subsystem-bench.rs index 6f45214bc735..433354f6525d 100644 --- a/polkadot/node/subsystem-bench/src/subsystem-bench.rs +++ b/polkadot/node/subsystem-bench/src/subsystem-bench.rs @@ -100,6 +100,10 @@ struct BenchCli { /// Enable Cache Misses Profiling with Valgrind. Linux only, Valgrind must be in the PATH pub cache_misses: bool, + #[clap(long, default_value_t = false)] + /// Shows the output in YAML format + pub yaml_output: bool, + #[command(subcommand)] pub objective: cli::TestObjective, } @@ -164,34 +168,51 @@ impl BenchCli { format!("Sequence contains {} step(s)", num_steps).bright_purple() ); for (index, test_config) in test_sequence.into_iter().enumerate() { + let benchmark_name = + format!("{} #{} {}", &options.path, index + 1, test_config.objective); gum::info!(target: LOG_TARGET, "{}", format!("Step {}/{}", index + 1, num_steps).bright_purple(),); display_configuration(&test_config); - match test_config.objective { + let usage = match test_config.objective { TestObjective::DataAvailabilityRead(ref _opts) => { let mut state = TestState::new(&test_config); let (mut env, _protocol_config) = prepare_test(test_config, &mut state); env.runtime().block_on(availability::benchmark_availability_read( - &mut env, state, - )); + &benchmark_name, + &mut env, + state, + )) }, TestObjective::ApprovalVoting(ref options) => { let (mut env, state) = approval::prepare_test(test_config.clone(), options.clone()); - - env.runtime().block_on(bench_approvals(&mut env, state)); + env.runtime().block_on(bench_approvals( + &benchmark_name, + &mut env, + state, + )) }, TestObjective::DataAvailabilityWrite => { let mut state = TestState::new(&test_config); let (mut env, _protocol_config) = prepare_test(test_config, &mut state); env.runtime().block_on(availability::benchmark_availability_write( - &mut env, state, - )); + &benchmark_name, + &mut env, + state, + )) }, TestObjective::TestSequence(_) => todo!(), TestObjective::Unimplemented => todo!(), - } + }; + + let output = if self.yaml_output { + serde_yaml::to_string(&vec![usage])? + } else { + usage.to_string() + }; + println!("{}", output); } + return Ok(()) }, TestObjective::DataAvailabilityRead(ref _options) => self.create_test_configuration(), @@ -232,25 +253,28 @@ impl BenchCli { let mut state = TestState::new(&test_config); let (mut env, _protocol_config) = prepare_test(test_config, &mut state); - match self.objective { - TestObjective::DataAvailabilityRead(_options) => { - env.runtime() - .block_on(availability::benchmark_availability_read(&mut env, state)); - }, - TestObjective::DataAvailabilityWrite => { - env.runtime() - .block_on(availability::benchmark_availability_write(&mut env, state)); - }, - TestObjective::TestSequence(_options) => {}, + let benchmark_name = format!("{}", self.objective); + let usage = match self.objective { + TestObjective::DataAvailabilityRead(_options) => env.runtime().block_on( + availability::benchmark_availability_read(&benchmark_name, &mut env, state), + ), + TestObjective::DataAvailabilityWrite => env.runtime().block_on( + availability::benchmark_availability_write(&benchmark_name, &mut env, state), + ), + TestObjective::TestSequence(_options) => todo!(), TestObjective::ApprovalVoting(_) => todo!(), TestObjective::Unimplemented => todo!(), - } + }; if let Some(agent_running) = agent_running { let agent_ready = agent_running.stop()?; agent_ready.shutdown(); } + let output = + if self.yaml_output { serde_yaml::to_string(&vec![usage])? } else { usage.to_string() }; + println!("{}", output); + Ok(()) } } diff --git a/rust-toolchain.toml b/rust-toolchain.toml new file mode 100644 index 000000000000..5320fa808b20 --- /dev/null +++ b/rust-toolchain.toml @@ -0,0 +1,13 @@ +[toolchain] +# rustup has no subcommand (yet) for installing the toolchain in rust-toolchain.toml: +# https://github.com/rust-lang/rustup/issues/2686 +# The auto-installation behaviour in rustup will likely be removed: +# https://github.com/rust-lang/rustup/issues/1397 +channel = "1.75.0" +components = [ + "clippy", + "rust-analyzer", + "rust-src", +] +targets = ["wasm32-unknown-unknown"] +profile = "minimal" diff --git a/snowbridge-polkadot-sdk.patch b/snowbridge-polkadot-sdk.patch new file mode 100644 index 000000000000..d32636e2f074 --- /dev/null +++ b/snowbridge-polkadot-sdk.patch @@ -0,0 +1,1323 @@ +diff --git a/bridges/snowbridge/.gitignore b/bridges/snowbridge/.gitignore +new file mode 100644 +index 00000000000..2c1d07caf6f +--- /dev/null ++++ b/bridges/snowbridge/.gitignore +@@ -0,0 +1,17 @@ ++# Generated by Cargo ++# will have compiled files and executables ++**/target/ ++# These are backup files generated by rustfmt ++**/*.rs.bk ++ ++*.log ++rococo-local-raw.json ++rococo-local.json ++ ++.envrc ++ ++# The cache for chain data in container ++.local ++ ++# coverage reports ++cobertura.xml +diff --git a/bridges/snowbridge/Cargo.toml b/bridges/snowbridge/Cargo.toml +new file mode 100644 +index 00000000000..a9e9ed9abf9 +--- /dev/null ++++ b/bridges/snowbridge/Cargo.toml +@@ -0,0 +1,54 @@ ++[workspace.package] ++authors = ["Snowfork "] ++edition = "2021" ++repository = "https://github.com/snowfork/polkadot-sdk.git" ++ ++[workspace] ++resolver = "2" ++members = [ ++ "pallets/ethereum-client", ++ "pallets/ethereum-client/fixtures", ++ "pallets/inbound-queue", ++ "pallets/inbound-queue/fixtures", ++ "pallets/outbound-queue", ++ "pallets/outbound-queue/merkle-tree", ++ "pallets/outbound-queue/runtime-api", ++ "pallets/system", ++ "pallets/system/runtime-api", ++ "primitives/beacon", ++ "primitives/core", ++ "primitives/ethereum", ++ "primitives/router", ++ "runtime/runtime-common", ++ "runtime/test-common", ++] ++ ++ ++[workspace.lints.rust] ++suspicious_double_ref_op = { level = "allow", priority = 2 } ++ ++[workspace.lints.clippy] ++all = { level = "allow", priority = 0 } ++correctness = { level = "warn", priority = 1 } ++complexity = { level = "warn", priority = 1 } ++if-same-then-else = { level = "allow", priority = 2 } ++zero-prefixed-literal = { level = "allow", priority = 2 } # 00_1000_000 ++type_complexity = { level = "allow", priority = 2 } # raison d'etre ++nonminimal-bool = { level = "allow", priority = 2 } # maybe ++borrowed-box = { level = "allow", priority = 2 } # Reasonable to fix this one ++too-many-arguments = { level = "allow", priority = 2 } # (Turning this on would lead to) ++needless-lifetimes = { level = "allow", priority = 2 } # generated code ++unnecessary_cast = { level = "allow", priority = 2 } # Types may change ++identity-op = { level = "allow", priority = 2 } # One case where we do 0 + ++useless_conversion = { level = "allow", priority = 2 } # Types may change ++unit_arg = { level = "allow", priority = 2 } # stylistic ++option-map-unit-fn = { level = "allow", priority = 2 } # stylistic ++bind_instead_of_map = { level = "allow", priority = 2 } # stylistic ++erasing_op = { level = "allow", priority = 2 } # E.g. 0 * DOLLARS ++eq_op = { level = "allow", priority = 2 } # In tests we test equality. ++while_immutable_condition = { level = "allow", priority = 2 } # false positives ++needless_option_as_deref = { level = "allow", priority = 2 } # false positives ++derivable_impls = { level = "allow", priority = 2 } # false positives ++stable_sort_primitive = { level = "allow", priority = 2 } # prefer stable sort ++extra-unused-type-parameters = { level = "allow", priority = 2 } # stylistic ++default_constructed_unit_structs = { level = "allow", priority = 2 } # stylistic +diff --git a/bridges/snowbridge/README.md b/bridges/snowbridge/README.md +index 49b9c2eaf55..3e5f63098ac 100644 +--- a/bridges/snowbridge/README.md ++++ b/bridges/snowbridge/README.md +@@ -1,32 +1,40 @@ +-# Snowbridge +-[![codecov](https://codecov.io/gh/Snowfork/snowbridge/branch/main/graph/badge.svg?token=9hvgSws4rN)](https://codecov.io/gh/Snowfork/snowbridge) ++# Snowbridge · ++[![codecov](https://codecov.io/gh/Snowfork/polkadot-sdk/branch/snowbridge/graph/badge.svg?token=9hvgSws4rN)](https://codecov.io/gh/Snowfork/polkadot-sdk) + ![GitHub](https://img.shields.io/github/license/Snowfork/snowbridge) + + Snowbridge is a trustless bridge between Polkadot and Ethereum. For documentation, visit https://docs.snowbridge.network. + + ## Components + ++The Snowbridge project lives in two repositories: ++ ++- [Snowfork/polkadot-sdk](https://github.com/Snowfork/polkadot-sdk): The Snowbridge parachain and pallets live in ++a fork of the polkadot-sdk. Changes are eventually contributed back to ++[paritytech/polkadot-sdk](https://github.com/paritytech/polkadot-sdk) ++- [Snowfork/snowbridge](https://github.com/Snowfork/snowbridge): The rest of the Snowbridge components, like contracts, ++off-chain relayer, end-to-end tests and test-net setup code. ++ + ### Parachain + +-Polkadot parachain and our pallets. See [parachain/README.md](https://github.com/Snowfork/snowbridge/blob/main/parachain/README.md). ++Polkadot parachain and our pallets. See [README.md](https://github.com/Snowfork/polkadot-sdk/blob/snowbridge/bridges/snowbridge/README.md). + + ### Contracts + +-Ethereum contracts and unit tests. See [contracts/README.md](https://github.com/Snowfork/snowbridge/blob/main/contracts/README.md) ++Ethereum contracts and unit tests. See [Snowfork/snowbridge/contracts/README.md](https://github.com/Snowfork/snowbridge/blob/main/contracts/README.md) + + ### Relayer + + Off-chain relayer services for relaying messages between Polkadot and Ethereum. See +-[relayer/README.md](https://github.com/Snowfork/snowbridge/blob/main/relayer/README.md) ++[Snowfork/snowbridge/relayer/README.md](https://github.com/Snowfork/snowbridge/blob/main/relayer/README.md) + + ### Local Testnet + + Scripts to provision a local testnet, running the above services to bridge between local deployments of Polkadot and +-Ethereum. See [web/packages/test/README.md](https://github.com/Snowfork/snowbridge/blob/main/web/packages/test/README.md). ++Ethereum. See [Snowfork/snowbridge/web/packages/test/README.md](https://github.com/Snowfork/snowbridge/blob/main/web/packages/test/README.md). + + ### Smoke Tests + +-Integration tests for our local testnet. See [smoketest/README.md](https://github.com/Snowfork/snowbridge/blob/main/smoketest/README.md). ++Integration tests for our local testnet. See [Snowfork/snowbridge/smoketest/README.md](https://github.com/Snowfork/snowbridge/blob/main/smoketest/README.md). + + ## Development + +@@ -83,7 +91,7 @@ direnv allow + + ### Upgrading the Rust toolchain + +-Sometimes we would like to upgrade rust toolchain. First update `parachain/rust-toolchain.toml` as required and then ++Sometimes we would like to upgrade rust toolchain. First update `rust-toolchain.toml` as required and then + update `flake.lock` running + ```sh + nix flake lock --update-input rust-overlay +diff --git a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml +index 2f76d5b8357..a30cb88c908 100644 +--- a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml ++++ b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml +@@ -1,7 +1,7 @@ + [package] + name = "snowbridge-pallet-ethereum-client" + description = "Snowbridge Ethereum Client Pallet" +-version = "0.0.0" ++version = "0.9.0" + authors = ["Snowfork "] + edition.workspace = true + repository.workspace = true +diff --git a/bridges/snowbridge/pallets/ethereum-client/fuzz/.gitignore b/bridges/snowbridge/pallets/ethereum-client/fuzz/.gitignore +new file mode 100644 +index 00000000000..2f940cc8b2e +--- /dev/null ++++ b/bridges/snowbridge/pallets/ethereum-client/fuzz/.gitignore +@@ -0,0 +1,5 @@ ++target ++corpus ++artifacts ++coverage ++.idea +diff --git a/bridges/snowbridge/pallets/ethereum-client/fuzz/Cargo.toml b/bridges/snowbridge/pallets/ethereum-client/fuzz/Cargo.toml +new file mode 100644 +index 00000000000..2a4e91590c2 +--- /dev/null ++++ b/bridges/snowbridge/pallets/ethereum-client/fuzz/Cargo.toml +@@ -0,0 +1,41 @@ ++[package] ++name = "snowbridge-ethereum-client-fuzz" ++version = "0.0.0" ++publish = false ++edition = "2021" ++ ++[package.metadata] ++cargo-fuzz = true ++ ++[dependencies] ++libfuzzer-sys = { version = "0.4", features = ["arbitrary-derive"] } ++arbitrary = { version = "1", optional = true, features = ["derive"] } ++snowbridge-pallet-ethereum-client = { path = "..", features = ["fuzzing"] } ++snowbridge-beacon-primitives = { path = "../../../primitives/beacon" } ++hex-literal = "0.4.1" ++sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } ++ ++# Prevent this from interfering with workspaces ++[workspace] ++members = ["."] ++ ++[profile.release] ++debug = 1 ++ ++[[bin]] ++name = "fuzz_force_checkpoint" ++path = "fuzz_targets/fuzz_force_checkpoint.rs" ++test = false ++doc = false ++ ++[[bin]] ++name = "fuzz_submit" ++path = "fuzz_targets/fuzz_submit.rs" ++test = false ++doc = false ++ ++[[bin]] ++name = "fuzz_submit_execution_header" ++path = "fuzz_targets/fuzz_submit_execution_header.rs" ++test = false ++doc = false +diff --git a/bridges/snowbridge/pallets/ethereum-client/fuzz/README.md b/bridges/snowbridge/pallets/ethereum-client/fuzz/README.md +new file mode 100644 +index 00000000000..eeecf199ca0 +--- /dev/null ++++ b/bridges/snowbridge/pallets/ethereum-client/fuzz/README.md +@@ -0,0 +1,21 @@ ++# Beacon client fuzz tests ++ ++This crate contains fuzz tests for the three beacon client extrinsics. ++ ++# Installation ++ ++``` ++cargo install cargo-fuzz ++``` ++ ++# Run tests ++ ++- Force Checkpoint: `cargo fuzz run fuzz_force_checkpoint -- -max_len=10000000000` ++- Submit: `cargo fuzz run fuzz_submit -- -max_len=10000000000` ++- Submit Execution Header: `cargo fuzz run fuzz_submit_execution_header -- -max_len=10000000000` ++ ++Note: `max-len` is necessary because the max input length is 4096 bytes. Some of our inputs are larger than this ++default value. When running the tests without an increased max len parameter, no fuzz data will be generated. ++ ++The tests will keep running until a crash is found, so in our CI setup the number of runs is limited so that the ++test completes. +diff --git a/bridges/snowbridge/pallets/ethereum-client/fuzz/fuzz_targets/fuzz_force_checkpoint.rs b/bridges/snowbridge/pallets/ethereum-client/fuzz/fuzz_targets/fuzz_force_checkpoint.rs +new file mode 100644 +index 00000000000..2e3d7543f46 +--- /dev/null ++++ b/bridges/snowbridge/pallets/ethereum-client/fuzz/fuzz_targets/fuzz_force_checkpoint.rs +@@ -0,0 +1,16 @@ ++#![no_main] ++extern crate snowbridge_pallet_ethereum_client; ++ ++use libfuzzer_sys::fuzz_target; ++use snowbridge_pallet_ethereum_client::{mock::*, types::CheckpointUpdate}; ++use snowbridge_ethereum_beacon_client_fuzz::types::FuzzCheckpointUpdate; ++use std::convert::TryInto; ++ ++fuzz_target!(|input: FuzzCheckpointUpdate| { ++ new_tester().execute_with(|| { ++ let update: CheckpointUpdate = input.try_into().unwrap(); ++ let result = ++ EthereumBeaconClient::force_checkpoint(RuntimeOrigin::root(), Box::new(update)); ++ assert!(result.is_err()); ++ }); ++}); +diff --git a/bridges/snowbridge/pallets/ethereum-client/fuzz/fuzz_targets/fuzz_submit.rs b/bridges/snowbridge/pallets/ethereum-client/fuzz/fuzz_targets/fuzz_submit.rs +new file mode 100644 +index 00000000000..79c69e2cb23 +--- /dev/null ++++ b/bridges/snowbridge/pallets/ethereum-client/fuzz/fuzz_targets/fuzz_submit.rs +@@ -0,0 +1,16 @@ ++#![no_main] ++extern crate snowbridge_pallet_ethereum_client; ++ ++use snowbridge_pallet_ethereum_client::{mock::*, types::Update}; ++use snowbridge_ethereum_beacon_client_fuzz::types::FuzzUpdate; ++use std::convert::TryInto; ++ ++use libfuzzer_sys::fuzz_target; ++ ++fuzz_target!(|input: FuzzUpdate| { ++ new_tester().execute_with(|| { ++ let update: Update = input.try_into().unwrap(); ++ let result = EthereumBeaconClient::submit(RuntimeOrigin::signed(1), Box::new(update)); ++ assert!(result.is_err()); ++ }); ++}); +diff --git a/bridges/snowbridge/pallets/ethereum-client/fuzz/fuzz_targets/fuzz_submit_execution_header.rs b/bridges/snowbridge/pallets/ethereum-client/fuzz/fuzz_targets/fuzz_submit_execution_header.rs +new file mode 100644 +index 00000000000..82f4ea97079 +--- /dev/null ++++ b/bridges/snowbridge/pallets/ethereum-client/fuzz/fuzz_targets/fuzz_submit_execution_header.rs +@@ -0,0 +1,19 @@ ++#![no_main] ++extern crate snowbridge_pallet_ethereum_client; ++ ++use snowbridge_beacon_primitives::ExecutionHeaderUpdate; ++use snowbridge_pallet_ethereum_client::mock::*; ++use snowbridge_pallet_ethereum_client::types::FuzzExecutionHeaderUpdate; ++ ++use libfuzzer_sys::fuzz_target; ++ ++fuzz_target!(|input: FuzzExecutionHeaderUpdate| { ++ new_tester().execute_with(|| { ++ let update: ExecutionHeaderUpdate = input.try_into().unwrap(); ++ let result = EthereumBeaconClient::submit_execution_header( ++ RuntimeOrigin::signed(1), ++ Box::new(update), ++ ); ++ assert!(result.is_err()); ++ }); ++}); +diff --git a/bridges/snowbridge/pallets/ethereum-client/fuzz/src/impls.rs b/bridges/snowbridge/pallets/ethereum-client/fuzz/src/impls.rs +new file mode 100644 +index 00000000000..b216429fd3c +--- /dev/null ++++ b/bridges/snowbridge/pallets/ethereum-client/fuzz/src/impls.rs +@@ -0,0 +1,220 @@ ++use crate::types::{ ++ FuzzAncestryProof, FuzzBeaconHeader, FuzzCheckpointUpdate, FuzzExecutionHeaderUpdate, ++ FuzzExecutionPayloadHeader, FuzzNextSyncCommitteeUpdate, FuzzSyncAggregate, FuzzSyncCommittee, ++ FuzzUpdate, ++}; ++use arbitrary::{Arbitrary, Result, Unstructured}; ++use libfuzzer_sys::arbitrary; ++use snowbridge_beacon_primitives::{ ++ types::BeaconHeader, updates::AncestryProof, ExecutionHeaderUpdate, ExecutionPayloadHeader, ++ PublicKey, ++}; ++use snowbridge_pallet_ethereum_client::types::{ ++ CheckpointUpdate, NextSyncCommitteeUpdate, SyncAggregate, SyncCommittee, Update, ++}; ++use sp_core::H256; ++use std::convert::TryInto; ++ ++impl TryFrom for Update { ++ type Error = String; ++ ++ fn try_from(other: FuzzUpdate) -> Result { ++ let next: Option = other ++ .next_sync_committee_update ++ .map(|fuzz_update| fuzz_update.try_into().unwrap()); ++ ++ Ok(Self { ++ attested_header: other.attested_header.clone().try_into().unwrap(), ++ sync_aggregate: other.sync_aggregate.try_into().unwrap(), ++ signature_slot: other.signature_slot, ++ next_sync_committee_update: next, ++ finalized_header: other.finalized_header.clone().try_into().unwrap(), ++ finality_branch: other ++ .finality_branch ++ .iter() ++ .map(|&hash| H256::from(hash)) ++ .collect::>() ++ .as_slice() ++ .try_into() ++ .unwrap(), ++ block_roots_root: other.block_roots_root.into(), ++ block_roots_branch: other ++ .block_roots_branch ++ .iter() ++ .map(|&hash| H256::from(hash)) ++ .collect::>() ++ .as_slice() ++ .try_into() ++ .unwrap(), ++ }) ++ } ++} ++ ++impl TryFrom for NextSyncCommitteeUpdate { ++ type Error = String; ++ ++ fn try_from(other: FuzzNextSyncCommitteeUpdate) -> Result { ++ Ok(Self { ++ next_sync_committee: other.next_sync_committee.try_into().unwrap(), ++ next_sync_committee_branch: other ++ .next_sync_committee_branch ++ .iter() ++ .map(|&hash| H256::from(hash)) ++ .collect::>() ++ .as_slice() ++ .try_into() ++ .unwrap(), ++ }) ++ } ++} ++ ++impl TryFrom for SyncAggregate { ++ type Error = String; ++ ++ fn try_from(other: FuzzSyncAggregate) -> Result { ++ Ok(Self { ++ sync_committee_bits: other.sync_committee_bits.into(), ++ sync_committee_signature: other.sync_committee_signature.into(), ++ }) ++ } ++} ++ ++impl TryFrom for CheckpointUpdate { ++ type Error = String; ++ ++ fn try_from(other: FuzzCheckpointUpdate) -> Result { ++ Ok(Self { ++ header: other.header.clone().try_into().unwrap(), ++ current_sync_committee: other.current_sync_committee.try_into().unwrap(), ++ current_sync_committee_branch: other ++ .current_sync_committee_branch ++ .iter() ++ .map(|&hash| H256::from(hash)) ++ .collect::>() ++ .as_slice() ++ .try_into() ++ .unwrap(), ++ validators_root: other.validators_root.into(), ++ block_roots_root: other.block_roots_root.into(), ++ block_roots_branch: other ++ .block_roots_branch ++ .iter() ++ .map(|&hash| H256::from(hash)) ++ .collect::>() ++ .as_slice() ++ .try_into() ++ .unwrap(), ++ }) ++ } ++} ++ ++impl TryFrom for SyncCommittee { ++ type Error = String; ++ ++ fn try_from(other: FuzzSyncCommittee) -> Result { ++ Ok(Self { ++ pubkeys: other ++ .pubkeys ++ .iter() ++ .map(|&pk| { pk.into() }) ++ .collect::>() ++ .as_slice() ++ .try_into() ++ .unwrap(), ++ aggregate_pubkey: other.aggregate_pubkey.into(), ++ }) ++ } ++} ++ ++impl TryFrom for AncestryProof { ++ type Error = String; ++ ++ fn try_from(other: FuzzAncestryProof) -> Result { ++ Ok(Self { ++ header_branch: other ++ .header_branch ++ .iter() ++ .map(|&hash| H256::from(hash)) ++ .collect::>() ++ .as_slice() ++ .try_into() ++ .unwrap(), ++ finalized_block_root: other.finalized_block_root.into(), ++ }) ++ } ++} ++ ++impl TryFrom for ExecutionPayloadHeader { ++ type Error = String; ++ ++ fn try_from(other: FuzzExecutionPayloadHeader) -> Result { ++ Ok(Self { ++ parent_hash: other.parent_hash.into(), ++ fee_recipient: other.fee_recipient.into(), ++ state_root: other.state_root.into(), ++ receipts_root: other.receipts_root.into(), ++ logs_bloom: other.logs_bloom.into(), ++ prev_randao: other.prev_randao.into(), ++ block_number: other.block_number, ++ gas_limit: other.gas_limit, ++ gas_used: other.gas_used, ++ timestamp: other.timestamp, ++ extra_data: other.extra_data.into(), ++ base_fee_per_gas: other.base_fee_per_gas.into(), ++ block_hash: other.block_hash.into(), ++ transactions_root: other.transactions_root.into(), ++ withdrawals_root: other.withdrawals_root.into(), ++ }) ++ } ++} ++ ++impl TryFrom for BeaconHeader { ++ type Error = String; ++ ++ fn try_from(other: FuzzBeaconHeader) -> Result { ++ Ok(Self { ++ slot: other.slot, ++ proposer_index: other.proposer_index, ++ parent_root: other.parent_root.into(), ++ state_root: other.state_root.into(), ++ body_root: other.body_root.into(), ++ }) ++ } ++} ++ ++impl TryFrom for ExecutionHeaderUpdate { ++ type Error = String; ++ ++ fn try_from(other: FuzzExecutionHeaderUpdate) -> Result { ++ let ancestry_proof: Option = ++ other.ancestry_proof.map(|fuzz_update| fuzz_update.try_into().unwrap()); ++ ++ Ok(Self { ++ header: other.header.try_into().unwrap(), ++ ancestry_proof, ++ execution_header: other.execution_header.try_into().unwrap(), ++ execution_branch: other ++ .execution_branch ++ .iter() ++ .map(|&hash| H256::from(hash)) ++ .collect::>() ++ .as_slice() ++ .try_into() ++ .unwrap(), ++ }) ++ } ++} ++ ++// The arbitrary derive implementation is super slow for the pubkey set, so create a custom ++// impl for the sync committee. ++impl Arbitrary<'_> for FuzzSyncCommittee { ++ fn arbitrary(u: &mut Unstructured<'_>) -> Result { ++ let mut pubkeys = [[0u8; 48]; 32]; ++ ++ for i in 0..32 { ++ pubkeys[i] = <[u8; 48]>::arbitrary(u)?; ++ } ++ ++ Ok(FuzzSyncCommittee { pubkeys, aggregate_pubkey: <[u8; 48]>::arbitrary(u)? }) ++ } ++} +diff --git a/bridges/snowbridge/pallets/ethereum-client/fuzz/src/lib.rs b/bridges/snowbridge/pallets/ethereum-client/fuzz/src/lib.rs +new file mode 100644 +index 00000000000..fb880c01820 +--- /dev/null ++++ b/bridges/snowbridge/pallets/ethereum-client/fuzz/src/lib.rs +@@ -0,0 +1,2 @@ ++pub mod impls; ++pub mod types; +diff --git a/bridges/snowbridge/pallets/ethereum-client/fuzz/src/types.rs b/bridges/snowbridge/pallets/ethereum-client/fuzz/src/types.rs +new file mode 100644 +index 00000000000..37e5eb0d6c4 +--- /dev/null ++++ b/bridges/snowbridge/pallets/ethereum-client/fuzz/src/types.rs +@@ -0,0 +1,83 @@ ++use libfuzzer_sys::arbitrary; ++ ++#[derive(arbitrary::Arbitrary, Debug, Clone)] ++pub struct FuzzCheckpointUpdate { ++ pub header: FuzzBeaconHeader, ++ pub current_sync_committee: FuzzSyncCommittee, ++ pub current_sync_committee_branch: Vec<[u8; 32]>, ++ pub validators_root: [u8; 32], ++ pub block_roots_root: [u8; 32], ++ pub block_roots_branch: Vec<[u8; 32]>, ++} ++ ++#[derive(arbitrary::Arbitrary, Debug, Clone)] ++pub struct FuzzBeaconHeader { ++ pub slot: u64, ++ pub proposer_index: u64, ++ pub parent_root: [u8; 32], ++ pub state_root: [u8; 32], ++ pub body_root: [u8; 32], ++} ++ ++#[derive(Debug, Clone)] ++pub struct FuzzSyncCommittee { ++ pub pubkeys: [[u8; 48]; 32], ++ pub aggregate_pubkey: [u8; 48], ++} ++ ++#[derive(arbitrary::Arbitrary, Debug, Clone)] ++pub struct FuzzUpdate { ++ pub attested_header: FuzzBeaconHeader, ++ pub sync_aggregate: FuzzSyncAggregate, ++ pub signature_slot: u64, ++ pub next_sync_committee_update: Option, ++ pub finalized_header: FuzzBeaconHeader, ++ pub finality_branch: Vec<[u8; 32]>, ++ pub block_roots_root: [u8; 32], ++ pub block_roots_branch: Vec<[u8; 32]>, ++} ++ ++#[derive(arbitrary::Arbitrary, Debug, Clone)] ++pub struct FuzzSyncAggregate { ++ pub sync_committee_bits: [u8; 4], ++ pub sync_committee_signature: [u8; 96], ++} ++ ++#[derive(arbitrary::Arbitrary, Debug, Clone)] ++pub struct FuzzNextSyncCommitteeUpdate { ++ pub next_sync_committee: FuzzSyncCommittee, ++ pub next_sync_committee_branch: Vec<[u8; 32]>, ++} ++ ++#[derive(arbitrary::Arbitrary, Debug, Clone)] ++pub struct FuzzExecutionHeaderUpdate { ++ pub header: FuzzBeaconHeader, ++ pub ancestry_proof: Option, ++ pub execution_header: FuzzExecutionPayloadHeader, ++ pub execution_branch: Vec<[u8; 32]>, ++} ++ ++#[derive(arbitrary::Arbitrary, Debug, Clone)] ++pub struct FuzzAncestryProof { ++ pub header_branch: Vec<[u8; 32]>, ++ pub finalized_block_root: [u8; 32], ++} ++ ++#[derive(arbitrary::Arbitrary, Debug, Clone)] ++pub struct FuzzExecutionPayloadHeader { ++ pub parent_hash: [u8; 32], ++ pub fee_recipient: [u8; 20], ++ pub state_root: [u8; 32], ++ pub receipts_root: [u8; 32], ++ pub logs_bloom: Vec, ++ pub prev_randao: [u8; 32], ++ pub block_number: u64, ++ pub gas_limit: u64, ++ pub gas_used: u64, ++ pub timestamp: u64, ++ pub extra_data: Vec, ++ pub base_fee_per_gas: [u8; 32], ++ pub block_hash: [u8; 32], ++ pub transactions_root: [u8; 32], ++ pub withdrawals_root: [u8; 32], ++} +diff --git a/bridges/snowbridge/pallets/inbound-queue/Cargo.toml b/bridges/snowbridge/pallets/inbound-queue/Cargo.toml +index 1081b162dda..dc853dc0a51 100644 +--- a/bridges/snowbridge/pallets/inbound-queue/Cargo.toml ++++ b/bridges/snowbridge/pallets/inbound-queue/Cargo.toml +@@ -1,7 +1,7 @@ + [package] + name = "snowbridge-pallet-inbound-queue" + description = "Snowbridge Inbound Queue Pallet" +-version = "0.0.0" ++version = "0.9.0" + authors = ["Snowfork "] + edition.workspace = true + repository.workspace = true +diff --git a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs +index 110f611c676..d75c3c6a186 100644 +--- a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs ++++ b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs +@@ -88,6 +88,7 @@ impl pallet_balances::Config for Test { + type MaxFreezes = (); + type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); ++ type MaxHolds = (); + } + + parameter_types! { +diff --git a/bridges/snowbridge/pallets/outbound-queue/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/Cargo.toml +index ae52fb3e5c4..03ddb12b7b4 100644 +--- a/bridges/snowbridge/pallets/outbound-queue/Cargo.toml ++++ b/bridges/snowbridge/pallets/outbound-queue/Cargo.toml +@@ -1,7 +1,7 @@ + [package] + name = "snowbridge-pallet-outbound-queue" + description = "Snowbridge Outbound Queue Pallet" +-version = "0.0.0" ++version = "0.9.0" + authors = ["Snowfork "] + edition.workspace = true + repository.workspace = true +diff --git a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml +index c185d5af706..503dc14bbc9 100644 +--- a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml ++++ b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml +@@ -1,7 +1,7 @@ + [package] + name = "snowbridge-outbound-queue-merkle-tree" + description = "Snowbridge Outbound Queue Merkle Tree" +-version = "0.1.1" ++version = "0.9.0" + authors = ["Snowfork "] + edition.workspace = true + repository.workspace = true +diff --git a/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml +index 347b3bae493..0f0bfc4e358 100644 +--- a/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml ++++ b/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml +@@ -1,7 +1,7 @@ + [package] + name = "snowbridge-outbound-queue-runtime-api" + description = "Snowbridge Outbound Queue Runtime API" +-version = "0.0.0" ++version = "0.9.0" + authors = ["Snowfork "] + edition.workspace = true + repository.workspace = true +diff --git a/bridges/snowbridge/pallets/system/Cargo.toml b/bridges/snowbridge/pallets/system/Cargo.toml +index aa600511633..f365b52e84d 100644 +--- a/bridges/snowbridge/pallets/system/Cargo.toml ++++ b/bridges/snowbridge/pallets/system/Cargo.toml +@@ -1,7 +1,7 @@ + [package] + name = "snowbridge-pallet-system" + description = "Snowbridge System Pallet" +-version = "0.0.0" ++version = "0.9.0" + authors = ["Snowfork "] + edition.workspace = true + repository.workspace = true +diff --git a/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml b/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml +index 355d2d29147..6d3735f776b 100644 +--- a/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml ++++ b/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml +@@ -1,7 +1,7 @@ + [package] + name = "snowbridge-system-runtime-api" + description = "Snowbridge System Runtime API" +-version = "0.0.0" ++version = "0.9.0" + authors = ["Snowfork "] + edition.workspace = true + repository.workspace = true +diff --git a/bridges/snowbridge/pallets/system/src/lib.rs b/bridges/snowbridge/pallets/system/src/lib.rs +index b7f38fb753d..13abee14b4a 100644 +--- a/bridges/snowbridge/pallets/system/src/lib.rs ++++ b/bridges/snowbridge/pallets/system/src/lib.rs +@@ -79,6 +79,8 @@ use xcm_executor::traits::ConvertLocation; + #[cfg(feature = "runtime-benchmarks")] + use frame_support::traits::OriginTrait; + ++pub use pallet::*; ++ + pub type BalanceOf = + <::Token as Inspect<::AccountId>>::Balance; + pub type AccountIdOf = ::AccountId; +diff --git a/bridges/snowbridge/pallets/system/src/mock.rs b/bridges/snowbridge/pallets/system/src/mock.rs +index edc3f141b07..bc229578132 100644 +--- a/bridges/snowbridge/pallets/system/src/mock.rs ++++ b/bridges/snowbridge/pallets/system/src/mock.rs +@@ -136,6 +136,7 @@ impl pallet_balances::Config for Test { + type MaxFreezes = (); + type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); ++ type MaxHolds = (); + } + + impl pallet_xcm_origin::Config for Test { +diff --git a/bridges/snowbridge/primitives/beacon/Cargo.toml b/bridges/snowbridge/primitives/beacon/Cargo.toml +index b59e5191b41..bb30eae33b1 100644 +--- a/bridges/snowbridge/primitives/beacon/Cargo.toml ++++ b/bridges/snowbridge/primitives/beacon/Cargo.toml +@@ -1,7 +1,7 @@ + [package] + name = "snowbridge-beacon-primitives" + description = "Snowbridge Beacon Primitives" +-version = "0.0.0" ++version = "0.9.0" + authors = ["Snowfork "] + edition.workspace = true + repository.workspace = true +diff --git a/bridges/snowbridge/primitives/beacon/src/types.rs b/bridges/snowbridge/primitives/beacon/src/types.rs +index 2af522f56b0..6f0886ba8b5 100644 +--- a/bridges/snowbridge/primitives/beacon/src/types.rs ++++ b/bridges/snowbridge/primitives/beacon/src/types.rs +@@ -586,7 +586,7 @@ pub mod deneb { + use sp_std::prelude::*; + + /// ExecutionPayloadHeader +- /// ++ /// https://github.com/ethereum/consensus-specs/blob/dev/specs/deneb/beacon-chain.md#executionpayloadheader + #[derive( + Default, Encode, Decode, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, TypeInfo, + )] +diff --git a/bridges/snowbridge/primitives/core/Cargo.toml b/bridges/snowbridge/primitives/core/Cargo.toml +index f735d01a848..8df3ac21f4b 100644 +--- a/bridges/snowbridge/primitives/core/Cargo.toml ++++ b/bridges/snowbridge/primitives/core/Cargo.toml +@@ -1,7 +1,7 @@ + [package] + name = "snowbridge-core" + description = "Snowbridge Core" +-version = "0.0.0" ++version = "0.9.0" + authors = ["Snowfork "] + edition.workspace = true + repository.workspace = true +diff --git a/bridges/snowbridge/primitives/ethereum/Cargo.toml b/bridges/snowbridge/primitives/ethereum/Cargo.toml +index 047ebdd5669..710a056daa5 100644 +--- a/bridges/snowbridge/primitives/ethereum/Cargo.toml ++++ b/bridges/snowbridge/primitives/ethereum/Cargo.toml +@@ -1,7 +1,7 @@ + [package] + name = "snowbridge-ethereum" + description = "Snowbridge Ethereum" +-version = "0.1.0" ++version = "0.9.0" + authors = ["Snowfork "] + edition.workspace = true + repository.workspace = true +diff --git a/bridges/snowbridge/primitives/router/Cargo.toml b/bridges/snowbridge/primitives/router/Cargo.toml +index 712c60c2148..6c0d6aefed9 100644 +--- a/bridges/snowbridge/primitives/router/Cargo.toml ++++ b/bridges/snowbridge/primitives/router/Cargo.toml +@@ -1,7 +1,7 @@ + [package] + name = "snowbridge-router-primitives" + description = "Snowbridge Router Primitives" +-version = "0.0.0" ++version = "0.9.0" + authors = ["Snowfork "] + edition.workspace = true + repository.workspace = true +diff --git a/bridges/snowbridge/runtime/runtime-common/Cargo.toml b/bridges/snowbridge/runtime/runtime-common/Cargo.toml +index f5b44b25585..12f9cfa42d9 100644 +--- a/bridges/snowbridge/runtime/runtime-common/Cargo.toml ++++ b/bridges/snowbridge/runtime/runtime-common/Cargo.toml +@@ -1,7 +1,7 @@ + [package] + name = "snowbridge-runtime-common" + description = "Snowbridge Runtime Common" +-version = "0.0.0" ++version = "0.9.0" + authors = ["Snowfork "] + edition.workspace = true + repository.workspace = true +diff --git a/bridges/snowbridge/runtime/test-common/Cargo.toml b/bridges/snowbridge/runtime/test-common/Cargo.toml +index a2994e61891..7c02ac40b63 100644 +--- a/bridges/snowbridge/runtime/test-common/Cargo.toml ++++ b/bridges/snowbridge/runtime/test-common/Cargo.toml +@@ -1,7 +1,7 @@ + [package] + name = "snowbridge-runtime-test-common" + description = "Snowbridge Runtime Tests" +-version = "0.0.0" ++version = "0.9.0" + authors = ["Snowfork "] + edition = "2021" + license = "Apache-2.0" +diff --git a/bridges/snowbridge/rustfmt.toml b/bridges/snowbridge/rustfmt.toml +new file mode 100644 +index 00000000000..c3421539123 +--- /dev/null ++++ b/bridges/snowbridge/rustfmt.toml +@@ -0,0 +1,24 @@ ++# Basic ++edition = "2021" ++hard_tabs = true ++max_width = 100 ++use_small_heuristics = "Max" ++# Imports ++imports_granularity = "Crate" ++reorder_imports = true ++# Consistency ++newline_style = "Unix" ++# Misc ++chain_width = 80 ++spaces_around_ranges = false ++binop_separator = "Back" ++reorder_impl_items = false ++match_arm_leading_pipes = "Preserve" ++match_arm_blocks = false ++match_block_trailing_comma = true ++trailing_comma = "Vertical" ++trailing_semicolon = false ++use_field_init_shorthand = true ++# Format comments ++comment_width = 100 ++wrap_comments = true +diff --git a/bridges/snowbridge/scripts/contribute-upstream.sh b/bridges/snowbridge/scripts/contribute-upstream.sh +new file mode 100755 +index 00000000000..8aa2d2a7035 +--- /dev/null ++++ b/bridges/snowbridge/scripts/contribute-upstream.sh +@@ -0,0 +1,80 @@ ++#!/bin/bash ++ ++# A script to cleanup the Snowfork fork of the polkadot-sdk to contribute it upstream back to parity/polkadot-sdk ++# ./bridges/snowbridge/scripts/contribute-upstream.sh ++ ++# show CLI help ++function show_help() { ++ set +x ++ echo " " ++ echo Error: $1 ++ echo "Usage:" ++ echo " ./bridges/snowbridge/scripts/contribute-upstream.sh Exit with code 0 if pallets code is well decoupled from the other code in the repo" ++ exit 1 ++} ++ ++if [[ -z "$1" ]]; then ++ echo "Please provide a branch name you would like your upstream branch to be named" ++ exit 1 ++fi ++ ++branch_name=$1 ++ ++set -eux ++ ++# let's avoid any restrictions on where this script can be called for - snowbridge repo may be ++# plugged into any other repo folder. So the script (and other stuff that needs to be removed) ++# may be located either in call dir, or one of it subdirs. ++SNOWBRIDGE_FOLDER="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )/../" ++ ++# Get the current Git branch name ++current_branch=$(git rev-parse --abbrev-ref HEAD) ++ ++if [ "$current_branch" = "$branch_name" ] || git branch | grep -q "$branch_name"; then ++ echo "Already on requested branch or branch exists, not creating." ++else ++ git branch "$branch_name" ++fi ++ ++git checkout "$branch_name" ++ ++# remove everything we think is not required for our needs ++rm -rf rust-toolchain.toml ++rm -rf $SNOWBRIDGE_FOLDER/.cargo ++rm -rf $SNOWBRIDGE_FOLDER/.github ++rm -rf $SNOWBRIDGE_FOLDER/SECURITY.md ++rm -rf $SNOWBRIDGE_FOLDER/.gitignore ++rm -rf $SNOWBRIDGE_FOLDER/templates ++rm -rf $SNOWBRIDGE_FOLDER/pallets/ethereum-client/fuzz ++ ++pushd $SNOWBRIDGE_FOLDER ++ ++# let's test if everything we need compiles ++cargo check -p snowbridge-pallet-ethereum-client ++cargo check -p snowbridge-pallet-ethereum-client --features runtime-benchmarks ++cargo check -p snowbridge-pallet-ethereum-client --features try-runtime ++cargo check -p snowbridge-pallet-inbound-queue ++cargo check -p snowbridge-pallet-inbound-queue --features runtime-benchmarks ++cargo check -p snowbridge-pallet-inbound-queue --features try-runtime ++cargo check -p snowbridge-pallet-outbound-queue ++cargo check -p snowbridge-pallet-outbound-queue --features runtime-benchmarks ++cargo check -p snowbridge-pallet-outbound-queue --features try-runtime ++cargo check -p snowbridge-pallet-system ++cargo check -p snowbridge-pallet-system --features runtime-benchmarks ++cargo check -p snowbridge-pallet-system --features try-runtime ++ ++# we're removing lock file after all checks are done. Otherwise we may use different ++# Substrate/Polkadot/Cumulus commits and our checks will fail ++rm -f $SNOWBRIDGE_FOLDER/Cargo.toml ++rm -f $SNOWBRIDGE_FOLDER/Cargo.lock ++ ++popd ++ ++# Replace Parity's CI files, that we have overwritten in our fork, to run our own CI ++rm -rf .github ++git remote -v | grep -w parity || git remote add parity https://github.com/paritytech/polkadot-sdk ++git fetch parity master ++git checkout parity/master -- .github ++git add -- .github ++ ++echo "OK" +diff --git a/bridges/snowbridge/templates/benchmarking-fixtures.mustache b/bridges/snowbridge/templates/benchmarking-fixtures.mustache +new file mode 100644 +index 00000000000..abeab79a532 +--- /dev/null ++++ b/bridges/snowbridge/templates/benchmarking-fixtures.mustache +@@ -0,0 +1,179 @@ ++// Generated, do not edit! ++// See README.md for instructions to generate ++use crate::{ ++ CheckpointUpdate, ExecutionHeaderUpdate, Update, ++}; ++use hex_literal::hex; ++use primitives::{ ++ types::deneb, updates::AncestryProof, BeaconHeader, NextSyncCommitteeUpdate, SyncAggregate, ++ SyncCommittee, VersionedExecutionPayloadHeader, ++}; ++use sp_core::U256; ++use sp_std::{boxed::Box, vec}; ++ ++pub fn make_checkpoint() -> Box { ++ Box::new(CheckpointUpdate { ++ header: BeaconHeader { ++ slot: {{CheckpointUpdate.Header.Slot}}, ++ proposer_index: {{CheckpointUpdate.Header.ProposerIndex}}, ++ parent_root: hex!("{{CheckpointUpdate.Header.ParentRoot}}").into(), ++ state_root: hex!("{{CheckpointUpdate.Header.StateRoot}}").into(), ++ body_root: hex!("{{CheckpointUpdate.Header.BodyRoot}}").into(), ++ }, ++ current_sync_committee: SyncCommittee { ++ pubkeys: [ ++ {{#CheckpointUpdate.CurrentSyncCommittee.Pubkeys}} ++ hex!("{{.}}").into(), ++ {{/CheckpointUpdate.CurrentSyncCommittee.Pubkeys}} ++ ], ++ aggregate_pubkey: hex!("{{CheckpointUpdate.CurrentSyncCommittee.AggregatePubkey}}").into(), ++ }, ++ current_sync_committee_branch: vec![ ++ {{#CheckpointUpdate.CurrentSyncCommitteeBranch}} ++ hex!("{{.}}").into(), ++ {{/CheckpointUpdate.CurrentSyncCommitteeBranch}} ++ ], ++ validators_root: hex!("{{CheckpointUpdate.ValidatorsRoot}}").into(), ++ block_roots_root: hex!("{{CheckpointUpdate.BlockRootsRoot}}").into(), ++ block_roots_branch: vec![ ++ {{#CheckpointUpdate.BlockRootsBranch}} ++ hex!("{{.}}").into(), ++ {{/CheckpointUpdate.BlockRootsBranch}} ++ ], ++ }) ++} ++ ++pub fn make_sync_committee_update() -> Box { ++ Box::new(Update { ++ attested_header: BeaconHeader { ++ slot: {{SyncCommitteeUpdate.AttestedHeader.Slot}}, ++ proposer_index: {{SyncCommitteeUpdate.AttestedHeader.ProposerIndex}}, ++ parent_root: hex!("{{SyncCommitteeUpdate.AttestedHeader.ParentRoot}}").into(), ++ state_root: hex!("{{SyncCommitteeUpdate.AttestedHeader.StateRoot}}").into(), ++ body_root: hex!("{{SyncCommitteeUpdate.AttestedHeader.BodyRoot}}").into(), ++ }, ++ sync_aggregate: SyncAggregate{ ++ sync_committee_bits: hex!("{{SyncCommitteeUpdate.SyncAggregate.SyncCommitteeBits}}"), ++ sync_committee_signature: hex!("{{SyncCommitteeUpdate.SyncAggregate.SyncCommitteeSignature}}").into(), ++ }, ++ signature_slot: {{SyncCommitteeUpdate.SignatureSlot}}, ++ next_sync_committee_update: Some(NextSyncCommitteeUpdate { ++ next_sync_committee: SyncCommittee { ++ pubkeys: [ ++ {{#SyncCommitteeUpdate.NextSyncCommitteeUpdate.NextSyncCommittee.Pubkeys}} ++ hex!("{{.}}").into(), ++ {{/SyncCommitteeUpdate.NextSyncCommitteeUpdate.NextSyncCommittee.Pubkeys}} ++ ], ++ aggregate_pubkey: hex!("{{SyncCommitteeUpdate.NextSyncCommitteeUpdate.NextSyncCommittee.AggregatePubkey}}").into(), ++ }, ++ next_sync_committee_branch: vec![ ++ {{#SyncCommitteeUpdate.NextSyncCommitteeUpdate.NextSyncCommitteeBranch}} ++ hex!("{{.}}").into(), ++ {{/SyncCommitteeUpdate.NextSyncCommitteeUpdate.NextSyncCommitteeBranch}} ++ ], ++ }), ++ finalized_header: BeaconHeader{ ++ slot: {{SyncCommitteeUpdate.FinalizedHeader.Slot}}, ++ proposer_index: {{SyncCommitteeUpdate.FinalizedHeader.ProposerIndex}}, ++ parent_root: hex!("{{SyncCommitteeUpdate.FinalizedHeader.ParentRoot}}").into(), ++ state_root: hex!("{{SyncCommitteeUpdate.FinalizedHeader.StateRoot}}").into(), ++ body_root: hex!("{{SyncCommitteeUpdate.FinalizedHeader.BodyRoot}}").into(), ++ }, ++ finality_branch: vec![ ++ {{#SyncCommitteeUpdate.FinalityBranch}} ++ hex!("{{.}}").into(), ++ {{/SyncCommitteeUpdate.FinalityBranch}} ++ ], ++ block_roots_root: hex!("{{SyncCommitteeUpdate.BlockRootsRoot}}").into(), ++ block_roots_branch: vec![ ++ {{#SyncCommitteeUpdate.BlockRootsBranch}} ++ hex!("{{.}}").into(), ++ {{/SyncCommitteeUpdate.BlockRootsBranch}} ++ ], ++ }) ++} ++ ++pub fn make_finalized_header_update() -> Box { ++ Box::new(Update { ++ attested_header: BeaconHeader { ++ slot: {{FinalizedHeaderUpdate.AttestedHeader.Slot}}, ++ proposer_index: {{FinalizedHeaderUpdate.AttestedHeader.ProposerIndex}}, ++ parent_root: hex!("{{FinalizedHeaderUpdate.AttestedHeader.ParentRoot}}").into(), ++ state_root: hex!("{{FinalizedHeaderUpdate.AttestedHeader.StateRoot}}").into(), ++ body_root: hex!("{{FinalizedHeaderUpdate.AttestedHeader.BodyRoot}}").into(), ++ }, ++ sync_aggregate: SyncAggregate{ ++ sync_committee_bits: hex!("{{FinalizedHeaderUpdate.SyncAggregate.SyncCommitteeBits}}"), ++ sync_committee_signature: hex!("{{FinalizedHeaderUpdate.SyncAggregate.SyncCommitteeSignature}}").into(), ++ }, ++ signature_slot: {{FinalizedHeaderUpdate.SignatureSlot}}, ++ next_sync_committee_update: None, ++ finalized_header: BeaconHeader { ++ slot: {{FinalizedHeaderUpdate.FinalizedHeader.Slot}}, ++ proposer_index: {{FinalizedHeaderUpdate.FinalizedHeader.ProposerIndex}}, ++ parent_root: hex!("{{FinalizedHeaderUpdate.FinalizedHeader.ParentRoot}}").into(), ++ state_root: hex!("{{FinalizedHeaderUpdate.FinalizedHeader.StateRoot}}").into(), ++ body_root: hex!("{{FinalizedHeaderUpdate.FinalizedHeader.BodyRoot}}").into(), ++ }, ++ finality_branch: vec![ ++ {{#FinalizedHeaderUpdate.FinalityBranch}} ++ hex!("{{.}}").into(), ++ {{/FinalizedHeaderUpdate.FinalityBranch}} ++ ], ++ block_roots_root: hex!("{{FinalizedHeaderUpdate.BlockRootsRoot}}").into(), ++ block_roots_branch: vec![ ++ {{#FinalizedHeaderUpdate.BlockRootsBranch}} ++ hex!("{{.}}").into(), ++ {{/FinalizedHeaderUpdate.BlockRootsBranch}} ++ ] ++ }) ++} ++ ++pub fn make_execution_header_update() -> Box { ++ Box::new(ExecutionHeaderUpdate { ++ header: BeaconHeader { ++ slot: {{HeaderUpdate.Header.Slot}}, ++ proposer_index: {{HeaderUpdate.Header.ProposerIndex}}, ++ parent_root: hex!("{{HeaderUpdate.Header.ParentRoot}}").into(), ++ state_root: hex!("{{HeaderUpdate.Header.StateRoot}}").into(), ++ body_root: hex!("{{HeaderUpdate.Header.BodyRoot}}").into(), ++ }, ++ {{#HeaderUpdate.AncestryProof}} ++ ancestry_proof: Some(AncestryProof { ++ header_branch: vec![ ++ {{#HeaderUpdate.AncestryProof.HeaderBranch}} ++ hex!("{{.}}").into(), ++ {{/HeaderUpdate.AncestryProof.HeaderBranch}} ++ ], ++ finalized_block_root: hex!("{{HeaderUpdate.AncestryProof.FinalizedBlockRoot}}").into(), ++ }), ++ {{/HeaderUpdate.AncestryProof}} ++ {{^HeaderUpdate.AncestryProof}} ++ ancestry_proof: None, ++ {{/HeaderUpdate.AncestryProof}} ++ execution_header: VersionedExecutionPayloadHeader::Deneb(deneb::ExecutionPayloadHeader { ++ parent_hash: hex!("{{HeaderUpdate.ExecutionHeader.Deneb.ParentHash}}").into(), ++ fee_recipient: hex!("{{HeaderUpdate.ExecutionHeader.Deneb.FeeRecipient}}").into(), ++ state_root: hex!("{{HeaderUpdate.ExecutionHeader.Deneb.StateRoot}}").into(), ++ receipts_root: hex!("{{HeaderUpdate.ExecutionHeader.Deneb.ReceiptsRoot}}").into(), ++ logs_bloom: hex!("{{HeaderUpdate.ExecutionHeader.Deneb.LogsBloom}}").into(), ++ prev_randao: hex!("{{HeaderUpdate.ExecutionHeader.Deneb.PrevRandao}}").into(), ++ block_number: {{HeaderUpdate.ExecutionHeader.Deneb.BlockNumber}}, ++ gas_limit: {{HeaderUpdate.ExecutionHeader.Deneb.GasLimit}}, ++ gas_used: {{HeaderUpdate.ExecutionHeader.Deneb.GasUsed}}, ++ timestamp: {{HeaderUpdate.ExecutionHeader.Deneb.Timestamp}}, ++ extra_data: hex!("{{HeaderUpdate.ExecutionHeader.Deneb.ExtraData}}").into(), ++ base_fee_per_gas: U256::from({{HeaderUpdate.ExecutionHeader.Deneb.BaseFeePerGas}}u64), ++ block_hash: hex!("{{HeaderUpdate.ExecutionHeader.Deneb.BlockHash}}").into(), ++ transactions_root: hex!("{{HeaderUpdate.ExecutionHeader.Deneb.TransactionRoot}}").into(), ++ withdrawals_root: hex!("{{HeaderUpdate.ExecutionHeader.Deneb.WithdrawalsRoot}}").into(), ++ blob_gas_used: {{HeaderUpdate.ExecutionHeader.Deneb.BlobGasUsed}}, ++ excess_blob_gas: {{HeaderUpdate.ExecutionHeader.Deneb.ExcessBlobGas}}, ++ }), ++ execution_branch: vec![ ++ {{#HeaderUpdate.ExecutionBranch}} ++ hex!("{{.}}").into(), ++ {{/HeaderUpdate.ExecutionBranch}} ++ ], ++ }) ++} +diff --git a/bridges/snowbridge/templates/module-weight-template.hbs b/bridges/snowbridge/templates/module-weight-template.hbs +new file mode 100644 +index 00000000000..5919a7cc7c1 +--- /dev/null ++++ b/bridges/snowbridge/templates/module-weight-template.hbs +@@ -0,0 +1,74 @@ ++{{header}} ++//! Autogenerated weights for `{{pallet}}` ++//! ++//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION {{version}} ++//! DATE: {{date}}, STEPS: `{{cmd.steps}}`, REPEAT: `{{cmd.repeat}}`, LOW RANGE: `{{cmd.lowest_range_values}}`, HIGH RANGE: `{{cmd.highest_range_values}}` ++//! WORST CASE MAP SIZE: `{{cmd.worst_case_map_values}}` ++//! HOSTNAME: `{{hostname}}`, CPU: `{{cpuname}}` ++//! WASM-EXECUTION: `{{cmd.wasm_execution}}`, CHAIN: `{{cmd.chain}}`, DB CACHE: `{{cmd.db_cache}}` ++ ++// Executed Command: ++{{#each args as |arg|}} ++// {{arg}} ++{{/each}} ++ ++#![cfg_attr(rustfmt, rustfmt_skip)] ++#![allow(unused_parens)] ++#![allow(unused_imports)] ++#![allow(missing_docs)] ++ ++use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; ++use core::marker::PhantomData; ++ ++/// Weight functions needed for `{{pallet}}`. ++pub trait WeightInfo { ++ {{#each benchmarks as |benchmark|}} ++ fn {{benchmark.name~}} ++ ( ++ {{~#each benchmark.components as |c| ~}} ++ {{c.name}}: u32, {{/each~}} ++ ) -> Weight; ++ {{/each}} ++} ++ ++// For backwards compatibility and tests. ++impl WeightInfo for () { ++ {{#each benchmarks as |benchmark|}} ++ {{#each benchmark.comments as |comment|}} ++ /// {{comment}} ++ {{/each}} ++ {{#each benchmark.component_ranges as |range|}} ++ /// The range of component `{{range.name}}` is `[{{range.min}}, {{range.max}}]`. ++ {{/each}} ++ fn {{benchmark.name~}} ++ ( ++ {{~#each benchmark.components as |c| ~}} ++ {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} ++ ) -> Weight { ++ // Proof Size summary in bytes: ++ // Measured: `{{benchmark.base_recorded_proof_size}}{{#each benchmark.component_recorded_proof_size as |cp|}} + {{cp.name}} * ({{cp.slope}} ±{{underscore cp.error}}){{/each}}` ++ // Estimated: `{{benchmark.base_calculated_proof_size}}{{#each benchmark.component_calculated_proof_size as |cp|}} + {{cp.name}} * ({{cp.slope}} ±{{underscore cp.error}}){{/each}}` ++ // Minimum execution time: {{underscore benchmark.min_execution_time}}_000 picoseconds. ++ Weight::from_parts({{underscore benchmark.base_weight}}, {{benchmark.base_calculated_proof_size}}) ++ {{#each benchmark.component_weight as |cw|}} ++ // Standard Error: {{underscore cw.error}} ++ .saturating_add(Weight::from_parts({{underscore cw.slope}}, 0).saturating_mul({{cw.name}}.into())) ++ {{/each}} ++ {{#if (ne benchmark.base_reads "0")}} ++ .saturating_add(RocksDbWeight::get().reads({{benchmark.base_reads}}_u64)) ++ {{/if}} ++ {{#each benchmark.component_reads as |cr|}} ++ .saturating_add(RocksDbWeight::get().reads(({{cr.slope}}_u64).saturating_mul({{cr.name}}.into()))) ++ {{/each}} ++ {{#if (ne benchmark.base_writes "0")}} ++ .saturating_add(RocksDbWeight::get().writes({{benchmark.base_writes}}_u64)) ++ {{/if}} ++ {{#each benchmark.component_writes as |cw|}} ++ .saturating_add(RocksDbWeight::get().writes(({{cw.slope}}_u64).saturating_mul({{cw.name}}.into()))) ++ {{/each}} ++ {{#each benchmark.component_calculated_proof_size as |cp|}} ++ .saturating_add(Weight::from_parts(0, {{cp.slope}}).saturating_mul({{cp.name}}.into())) ++ {{/each}} ++ } ++ {{/each}} ++} +diff --git a/bridges/snowbridge/templates/register_token.mustache b/bridges/snowbridge/templates/register_token.mustache +new file mode 100644 +index 00000000000..e8d35be42ec +--- /dev/null ++++ b/bridges/snowbridge/templates/register_token.mustache +@@ -0,0 +1,45 @@ ++// SPDX-License-Identifier: Apache-2.0 ++// SPDX-FileCopyrightText: 2023 Snowfork ++// Generated, do not edit! ++// See ethereum client README.md for instructions to generate ++ ++use crate::InboundQueueFixture; ++use hex_literal::hex; ++use snowbridge_beacon_primitives::CompactExecutionHeader; ++use snowbridge_core::inbound::{Log, Message, Proof}; ++use sp_std::vec; ++ ++pub fn make_register_token_message() -> InboundQueueFixture { ++ InboundQueueFixture { ++ execution_header: CompactExecutionHeader{ ++ parent_hash: hex!("{{InboundMessageTest.ExecutionHeader.ParentHash}}").into(), ++ block_number: {{InboundMessageTest.ExecutionHeader.BlockNumber}}, ++ state_root: hex!("{{InboundMessageTest.ExecutionHeader.StateRoot}}").into(), ++ receipts_root: hex!("{{InboundMessageTest.ExecutionHeader.ReceiptsRoot}}").into(), ++ }, ++ message: Message { ++ event_log: Log { ++ address: hex!("{{InboundMessageTest.Message.EventLog.Address}}").into(), ++ topics: vec![ ++ {{#InboundMessageTest.Message.EventLog.Topics}} ++ hex!("{{.}}").into(), ++ {{/InboundMessageTest.Message.EventLog.Topics}} ++ ], ++ data: hex!("{{InboundMessageTest.Message.EventLog.Data}}").into(), ++ }, ++ proof: Proof { ++ block_hash: hex!("{{InboundMessageTest.Message.Proof.BlockHash}}").into(), ++ tx_index: {{InboundMessageTest.Message.Proof.TxIndex}}, ++ data: (vec![ ++ {{#InboundMessageTest.Message.Proof.Data.Keys}} ++ hex!("{{.}}").to_vec(), ++ {{/InboundMessageTest.Message.Proof.Data.Keys}} ++ ], vec![ ++ {{#InboundMessageTest.Message.Proof.Data.Values}} ++ hex!("{{.}}").to_vec(), ++ {{/InboundMessageTest.Message.Proof.Data.Values}} ++ ]), ++ }, ++ }, ++ } ++} +diff --git a/bridges/snowbridge/templates/send_token.mustache b/bridges/snowbridge/templates/send_token.mustache +new file mode 100644 +index 00000000000..e104d2da279 +--- /dev/null ++++ b/bridges/snowbridge/templates/send_token.mustache +@@ -0,0 +1,45 @@ ++// SPDX-License-Identifier: Apache-2.0 ++// SPDX-FileCopyrightText: 2023 Snowfork ++// Generated, do not edit! ++// See ethereum client README.md for instructions to generate ++ ++use crate::InboundQueueFixture; ++use hex_literal::hex; ++use snowbridge_beacon_primitives::CompactExecutionHeader; ++use snowbridge_core::inbound::{Log, Message, Proof}; ++use sp_std::vec; ++ ++pub fn make_send_token_message() -> InboundQueueFixture { ++ InboundQueueFixture { ++ execution_header: CompactExecutionHeader{ ++ parent_hash: hex!("{{InboundMessageTest.ExecutionHeader.ParentHash}}").into(), ++ block_number: {{InboundMessageTest.ExecutionHeader.BlockNumber}}, ++ state_root: hex!("{{InboundMessageTest.ExecutionHeader.StateRoot}}").into(), ++ receipts_root: hex!("{{InboundMessageTest.ExecutionHeader.ReceiptsRoot}}").into(), ++ }, ++ message: Message { ++ event_log: Log { ++ address: hex!("{{InboundMessageTest.Message.EventLog.Address}}").into(), ++ topics: vec![ ++ {{#InboundMessageTest.Message.EventLog.Topics}} ++ hex!("{{.}}").into(), ++ {{/InboundMessageTest.Message.EventLog.Topics}} ++ ], ++ data: hex!("{{InboundMessageTest.Message.EventLog.Data}}").into(), ++ }, ++ proof: Proof { ++ block_hash: hex!("{{InboundMessageTest.Message.Proof.BlockHash}}").into(), ++ tx_index: {{InboundMessageTest.Message.Proof.TxIndex}}, ++ data: (vec![ ++ {{#InboundMessageTest.Message.Proof.Data.Keys}} ++ hex!("{{.}}").to_vec(), ++ {{/InboundMessageTest.Message.Proof.Data.Keys}} ++ ], vec![ ++ {{#InboundMessageTest.Message.Proof.Data.Values}} ++ hex!("{{.}}").to_vec(), ++ {{/InboundMessageTest.Message.Proof.Data.Values}} ++ ]), ++ }, ++ }, ++ } ++}