From 49a68132882e58872411c5c0278b13a008b3682b Mon Sep 17 00:00:00 2001 From: Maksym H <1177472+mordamax@users.noreply.github.com> Date: Thu, 5 Sep 2024 14:57:58 +0100 Subject: [PATCH 01/66] GHA Migration - test-misc (#5385) Closes: https://github.com/paritytech/ci_cd/issues/1018 ![image](https://github.com/user-attachments/assets/b434d809-2c38-47e9-8a62-448f32cf24cb) Added DAG similar to how it was on Gitlab --------- Co-authored-by: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> --- .../scripts}/check-each-crate.py | 10 +- .github/workflows/tests-misc.yml | 413 ++++++++++++++++++ .github/workflows/tests.yml | 9 +- .gitlab-ci.yml | 87 +--- .gitlab/pipeline/test.yml | 294 ------------- 5 files changed, 438 insertions(+), 375 deletions(-) rename {.gitlab => .github/scripts}/check-each-crate.py (81%) create mode 100644 .github/workflows/tests-misc.yml diff --git a/.gitlab/check-each-crate.py b/.github/scripts/check-each-crate.py similarity index 81% rename from .gitlab/check-each-crate.py rename to .github/scripts/check-each-crate.py index 9b654f8071ac..7a53e812ddfc 100755 --- a/.gitlab/check-each-crate.py +++ b/.github/scripts/check-each-crate.py @@ -9,6 +9,7 @@ # # - `target_group`: Integer starting from 1, the group this script should execute. # - `groups_total`: Integer starting from 1, total number of groups. +# - `disable_forklift`: Boolean, whether to disable forklift or not. import subprocess, sys @@ -31,6 +32,9 @@ target_group = int(sys.argv[1]) - 1 groups_total = int(sys.argv[2]) +disable_forklift = bool(sys.argv[3] if len(sys.argv) > 3 else False) + +print(f"Target group: {target_group}, Total groups: {groups_total}, Disable forklift: {disable_forklift}", file=sys.stderr) if len(crates) == 0: print("No crates detected!", file=sys.stderr) @@ -55,7 +59,11 @@ print(f"Checking {crates[crate][0]}", file=sys.stderr) - res = subprocess.run(["forklift", "cargo", "check", "--locked"], cwd = crates[crate][1]) + cmd = ["cargo", "check", "--locked"] + + cmd.insert(0, 'forklift') if not disable_forklift else None + + res = subprocess.run(cmd, cwd = crates[crate][1]) if res.returncode != 0: sys.exit(1) diff --git a/.github/workflows/tests-misc.yml b/.github/workflows/tests-misc.yml new file mode 100644 index 000000000000..824e8c11c2a4 --- /dev/null +++ b/.github/workflows/tests-misc.yml @@ -0,0 +1,413 @@ +name: tests misc + +on: + push: + branches: + - master + pull_request: + types: [ opened, synchronize, reopened, ready_for_review ] + merge_group: +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +# Jobs in this workflow depend on each other, only for limiting peak amount of spawned workers + +jobs: + #changes: + # permissions: + # pull-requests: read + # uses: ./.github/workflows/reusable-check-changed-files.yml + + set-image: + # needs: [ changes ] + # if: needs.changes.outputs.rust || needs.changes.outputs.current-workflow + # GitHub Actions allows using 'env' in a container context. + # However, env variables don't work for forks: https://github.com/orgs/community/discussions/44322 + # This workaround sets the container image for each job using 'set-image' job output. + runs-on: ubuntu-latest + outputs: + IMAGE: ${{ steps.set_image.outputs.IMAGE }} + RUNNER: ${{ steps.set_runner.outputs.RUNNER }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - id: set_image + run: cat .github/env >> $GITHUB_OUTPUT + # By default, we use spot machines that can be terminated at any time. + # Merge queues use persistent runners to avoid kicking off from queue when the runner is terminated. + - id: set_runner + run: | + # Run merge queues on persistent runners + if [[ $GITHUB_REF_NAME == *"gh-readonly-queue"* ]]; then + echo "RUNNER=arc-runners-polkadot-sdk-beefy-persistent" >> $GITHUB_OUTPUT + else + echo "RUNNER=arc-runners-polkadot-sdk-beefy" >> $GITHUB_OUTPUT + fi + + # more information about this job can be found here: + # https://github.com/paritytech/substrate/pull/3778 + test-full-crypto-feature: + needs: [ set-image ] + runs-on: ${{ needs.set-image.outputs.RUNNER }} + timeout-minutes: 60 + container: + image: ${{ needs.set-image.outputs.IMAGE }} + env: + # Enable debug assertions since we are running optimized builds for testing + # but still want to have debug assertions. + RUSTFLAGS: "-C debug-assertions" + RUST_BACKTRACE: 1 + steps: + - name: Checkout + uses: actions/checkout@v4.1.7 + + - name: script + run: | + cd substrate/primitives/core/ + forklift cargo build --locked --no-default-features --features full_crypto + cd ../application-crypto + forklift cargo build --locked --no-default-features --features full_crypto + + test-frame-examples-compile-to-wasm: + timeout-minutes: 20 + # into one job + needs: [ set-image, test-full-crypto-feature ] + runs-on: ${{ needs.set-image.outputs.RUNNER }} + container: + image: ${{ needs.set-image.outputs.IMAGE }} + env: + # Enable debug assertions since we are running optimized builds for testing + # but still want to have debug assertions. + RUSTFLAGS: "-C debug-assertions" + RUST_BACKTRACE: 1 + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: script + run: | + cd substrate/frame/examples/offchain-worker/ + forklift cargo build --locked --target=wasm32-unknown-unknown --no-default-features + cd ../basic + forklift cargo build --locked --target=wasm32-unknown-unknown --no-default-features + + test-frame-ui: + timeout-minutes: 60 + needs: [ set-image, test-frame-examples-compile-to-wasm ] + runs-on: ${{ needs.set-image.outputs.RUNNER }} + container: + image: ${{ needs.set-image.outputs.IMAGE }} + env: + # Enable debug assertions since we are running optimized builds for testing + # but still want to have debug assertions. + RUSTFLAGS: "-C debug-assertions -D warnings" + RUST_BACKTRACE: 1 + WASM_BUILD_NO_COLOR: 1 + WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings" + # Ensure we run the UI tests. + RUN_UI_TESTS: 1 + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: script + run: | + forklift cargo test --locked -q --profile testnet -p frame-support-test --features=frame-feature-testing,no-metadata-docs,try-runtime,experimental + forklift cargo test --locked -q --profile testnet -p frame-support-test --features=frame-feature-testing,frame-feature-testing-2,no-metadata-docs,try-runtime,experimental + forklift cargo test --locked -q --profile testnet -p xcm-procedural + forklift cargo test --locked -q --profile testnet -p frame-election-provider-solution-type + forklift cargo test --locked -q --profile testnet -p sp-api-test + # There is multiple version of sp-runtime-interface in the repo. So we point to the manifest. + forklift cargo test --locked -q --profile testnet --manifest-path substrate/primitives/runtime-interface/Cargo.toml + + test-deterministic-wasm: + timeout-minutes: 20 + needs: [ set-image, test-frame-ui ] + runs-on: ${{ needs.set-image.outputs.RUNNER }} + container: + image: ${{ needs.set-image.outputs.IMAGE }} + env: + WASM_BUILD_NO_COLOR: 1 + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: script + run: | + # build runtime + forklift cargo build -q --locked --release -p westend-runtime -p rococo-runtime + # make checksum + sha256sum target/release/wbuild/*-runtime/target/wasm32-unknown-unknown/release/*.wasm > checksum.sha256 + cargo clean + # build again + forklift cargo build -q --locked --release -p westend-runtime -p rococo-runtime + # confirm checksum + sha256sum -c checksum.sha256 + + cargo-check-benches-branches: + needs: [ set-image ] + if: ${{ github.event_name == 'pull_request' }} + timeout-minutes: 60 + outputs: + branch: ${{ steps.branch.outputs.branch }} + runs-on: ubuntu-latest + steps: + - name: Branch + id: branch + run: | + echo "branch=['${{ github.base_ref }}', '${{ github.head_ref }}']" >> $GITHUB_OUTPUT + + cargo-check-benches: + needs: [ set-image, cargo-check-benches-branches ] + timeout-minutes: 60 + strategy: + matrix: + branch: ${{ fromJSON(needs.cargo-check-benches-branches.outputs.branch) }} + runs-on: ${{ needs.set-image.outputs.RUNNER }} + container: + image: ${{ needs.set-image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + ref: ${{ matrix.branch }} + + - name: script + run: | + ARTIFACTS_DIR=./artifacts + BENCH_TRIE_READ=::trie::read::small + BENCH_NODE_IMPORT=::node::import::sr25519::transfer_keep_alive::paritydb::small + mkdir -p $ARTIFACTS_DIR + + SKIP_WASM_BUILD=1 forklift cargo check --locked --benches --all; + forklift cargo run --locked --release -p node-bench -- $BENCH_TRIE_READ --json | tee $ARTIFACTS_DIR/bench_trie_read_small.json; + forklift cargo run --locked --release -p node-bench -- $BENCH_NODE_IMPORT --json | tee $ARTIFACTS_DIR/bench_transfer_keep_alive.json + + - name: Upload artifacts + uses: actions/upload-artifact@v4.3.6 + with: + path: ./artifacts + name: cargo-check-benches-${{ matrix.branch }}-${{ github.sha }} + retention-days: 1 + + node-bench-regression-guard: + timeout-minutes: 20 + runs-on: arc-runners-polkadot-sdk + needs: [ set-image, cargo-check-benches ] + steps: + - name: Checkout + uses: actions/checkout@v4.1.7 + + - name: Download artifact (master run) + uses: actions/download-artifact@v4.1.8 + with: + name: cargo-check-benches-${{ github.base_ref }}-${{ github.sha }} + path: ./artifacts/master + + - name: Download artifact (current run) + uses: actions/download-artifact@v4.1.8 + with: + name: cargo-check-benches-${{ github.head_ref }}-${{ github.sha }} + path: ./artifacts/current + + - name: script + id: compare + run: | + docker run --rm \ + -v $PWD/artifacts/master:/artifacts/master \ + -v $PWD/artifacts/current:/artifacts/current \ + paritytech/node-bench-regression-guard:latest \ + node-bench-regression-guard --reference /artifacts/master --compare-with /artifacts/current + + if [ $? -ne 0 ]; then + FAILED_MSG='### node-bench-regression-guard failed ❌, check the regression in *cargo-check-benches* job' + echo $FAILED_MSG + echo $FAILED_MSG >> $GITHUB_STEP_SUMMARY + exit 1 + else + echo "### node-bench-regression-guard passed ✅" >> $GITHUB_STEP_SUMMARY + fi + + test-node-metrics: + needs: [ set-image ] + timeout-minutes: 30 + runs-on: ${{ needs.set-image.outputs.RUNNER }} + container: + image: ${{ needs.set-image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4.1.7 + + - name: Run tests + id: tests + env: + RUST_TOOLCHAIN: stable + # Enable debug assertions since we are running optimized builds for testing + # but still want to have debug assertions. + RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" + run: | + forklift cargo build --bin polkadot-execute-worker --bin polkadot-prepare-worker --profile testnet --verbose --locked + mkdir -p ./artifacts + forklift cargo test --profile testnet --locked --features=runtime-metrics -p polkadot-node-metrics > ./artifacts/log.txt + echo "Metrics test passed" + + - name: Upload artifacts if failed + if: ${{ steps.tests.outcome != 'success' }} + uses: actions/upload-artifact@v4.3.6 + with: + name: node-metrics-failed + path: ./artifacts + + # more information about this job can be found here: + # https://github.com/paritytech/substrate/pull/6916 + check-tracing: + timeout-minutes: 20 + needs: [ set-image, test-node-metrics ] + runs-on: ${{ needs.set-image.outputs.RUNNER }} + container: + image: ${{ needs.set-image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4.1.7 + + - name: script + run: | + forklift cargo test --locked --manifest-path ./substrate/primitives/tracing/Cargo.toml --no-default-features + forklift cargo test --locked --manifest-path ./substrate/primitives/tracing/Cargo.toml --no-default-features --features=with-tracing + + check-metadata-hash: + timeout-minutes: 20 + needs: [ set-image, check-tracing ] + runs-on: ${{ needs.set-image.outputs.RUNNER }} + container: + image: ${{ needs.set-image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4.1.7 + + - name: script + run: | + forklift cargo build --locked -p westend-runtime --features metadata-hash + + cargo-hfuzz: + timeout-minutes: 20 + needs: [ set-image, check-metadata-hash ] + runs-on: ${{ needs.set-image.outputs.RUNNER }} + container: + image: ${{ needs.set-image.outputs.IMAGE }} + env: + # max 10s per iteration, 60s per file + HFUZZ_RUN_ARGS: | + --exit_upon_crash + --exit_code_upon_crash 1 + --timeout 10 + --run_time 60 + + # use git version of honggfuzz-rs until v0.5.56 is out, we need a few recent changes: + # https://github.com/rust-fuzz/honggfuzz-rs/pull/75 to avoid breakage on debian + # https://github.com/rust-fuzz/honggfuzz-rs/pull/81 fix to the above pr + # https://github.com/rust-fuzz/honggfuzz-rs/pull/82 fix for handling absolute CARGO_TARGET_DIR + HFUZZ_BUILD_ARGS: | + --config=patch.crates-io.honggfuzz.git="https://github.com/altaua/honggfuzz-rs" + --config=patch.crates-io.honggfuzz.rev="205f7c8c059a0d98fe1cb912cdac84f324cb6981" + steps: + - name: Checkout + uses: actions/checkout@v4.1.7 + + - name: Run honggfuzz + run: | + cd substrate/primitives/arithmetic/fuzzer + forklift cargo hfuzz build + for target in $(cargo read-manifest | jq -r '.targets | .[] | .name'); + do + forklift cargo hfuzz run "$target" || { printf "fuzzing failure for %s\n" "$target"; exit 1; }; + done + + - name: Upload artifacts + uses: actions/upload-artifact@v4.3.6 + with: + path: substrate/primitives/arithmetic/fuzzer/hfuzz_workspace/ + name: hfuzz-${{ github.sha }} + + cargo-check-each-crate: + timeout-minutes: 140 + needs: [ set-image ] + runs-on: ${{ needs.set-image.outputs.RUNNER }} + container: + image: ${{ needs.set-image.outputs.IMAGE }} + env: + RUSTFLAGS: "-D warnings" + CI_JOB_NAME: cargo-check-each-crate + strategy: + matrix: + index: [ 1,2,3,4,5,6,7 ] # 7 parallel jobs + steps: + - name: Checkout + uses: actions/checkout@v4.1.7 + + - name: Check Rust + run: | + rustup show + rustup +nightly show + + - name: script + run: | + mkdir -p /github/home/.forklift + cp .forklift/config.toml /github/home/.forklift/config.toml + PYTHONUNBUFFERED=x .github/scripts/check-each-crate.py ${{ matrix.index }} ${{ strategy.job-total }} + + # TODO: enable when we have a macos Self-Hosted runners + # cargo-check-each-crate-macos: + # timeout-minutes: 120 + # needs: [ set-image ] + # runs-on: macos-latest + # env: + # RUSTFLAGS: "-D warnings" + # CI_JOB_NAME: cargo-check-each-crate + # IMAGE: ${{ needs.set-image.outputs.IMAGE }} + # strategy: + # fail-fast: false + # matrix: + # index: [ 1,2,3,4,5,6,7,8,9,10 ] # 10 parallel jobs + # steps: + # - name: Checkout + # uses: actions/checkout@v4.1.7 + # + # - run: | + # VERSION=$(echo $IMAGE | sed -E 's/.*:bullseye-([^-]+)-.*/\1/') + # echo $VERSION + # echo "VERSION=$VERSION" >> $GITHUB_ENV + # + # - run: | + # rustup install $VERSION + # rustup default $VERSION + # + # - name: Check Rust + # run: | + # rustup show + # rustup +nightly show + # + # - name: MacOS Deps + # run: | + # brew install protobuf openssl pkg-config zlib xz zstd llvm jq curl gcc make cmake + # rustup target add wasm32-unknown-unknown --toolchain $VERSION + # rustup component add rust-src rustfmt clippy --toolchain $VERSION + # + # - name: script + # run: | + # PYTHONUNBUFFERED=x .github/scripts/check-each-crate.py ${{ matrix.index }} ${{ strategy.job-total }} True + + confirm-required-test-misc-jobs-passed: + runs-on: ubuntu-latest + name: All test misc tests passed + # If any new job gets added, be sure to add it to this array + needs: + - test-full-crypto-feature + - test-frame-examples-compile-to-wasm + - test-frame-ui + - cargo-check-benches + - node-bench-regression-guard + - test-node-metrics + - check-tracing + - cargo-check-each-crate + # - cargo-hfuzz remove from required for now, as it's flaky + steps: + - run: echo '### Good job! All the required tests passed 🚀' >> $GITHUB_STEP_SUMMARY \ No newline at end of file diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 25761fb94fd3..ed2ef07736b9 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -5,7 +5,7 @@ on: branches: - master pull_request: - types: [opened, synchronize, reopened, ready_for_review] + types: [ opened, synchronize, reopened, ready_for_review ] merge_group: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} @@ -44,7 +44,7 @@ jobs: # This job runs all benchmarks defined in the `/bin/node/runtime` once to check that there are no errors. quick-benchmarks: - needs: [set-image] + needs: [ set-image ] # if: ${{ needs.changes.outputs.rust }} runs-on: ${{ needs.set-image.outputs.RUNNER }} timeout-minutes: 60 @@ -63,7 +63,7 @@ jobs: # cf https://github.com/paritytech/polkadot-sdk/issues/1652 test-syscalls: - needs: [set-image] + needs: [ set-image ] # if: ${{ needs.changes.outputs.rust }} runs-on: ${{ needs.set-image.outputs.RUNNER }} timeout-minutes: 60 @@ -87,8 +87,9 @@ jobs: run: | echo "The x86_64 syscalls used by the worker binaries have changed. Please review if this is expected and update polkadot/scripts/list-syscalls/*-worker-syscalls as needed." >> $GITHUB_STEP_SUMMARY + cargo-check-all-benches: - needs: [set-image] + needs: [ set-image ] # if: ${{ needs.changes.outputs.rust }} runs-on: ${{ needs.set-image.outputs.RUNNER }} timeout-minutes: 60 diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 5b581c45fb85..8b4ca48150b1 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -21,7 +21,7 @@ workflow: - if: $CI_COMMIT_BRANCH variables: - CI_IMAGE: !reference [.ci-unified, variables, CI_IMAGE] + CI_IMAGE: !reference [ .ci-unified, variables, CI_IMAGE ] # BUILDAH_IMAGE is defined in group variables BUILDAH_COMMAND: "buildah --storage-driver overlay2" RELENG_SCRIPTS_BRANCH: "master" @@ -39,7 +39,7 @@ default: - runner_system_failure - unknown_failure - api_failure - cache: {} + cache: { } interruptible: true .collect-artifacts: @@ -68,8 +68,8 @@ default: .common-before-script: before_script: - - !reference [.job-switcher, before_script] - - !reference [.pipeline-stopper-vars, script] + - !reference [ .job-switcher, before_script ] + - !reference [ .pipeline-stopper-vars, script ] .job-switcher: before_script: @@ -78,8 +78,8 @@ default: .kubernetes-env: image: "${CI_IMAGE}" before_script: - - !reference [.common-before-script, before_script] - - !reference [.prepare-env, before_script] + - !reference [ .common-before-script, before_script ] + - !reference [ .prepare-env, before_script ] tags: - kubernetes-parity-build @@ -107,12 +107,12 @@ default: .docker-env: image: "${CI_IMAGE}" variables: - FL_FORKLIFT_VERSION: !reference [.forklift, variables, FL_FORKLIFT_VERSION] + FL_FORKLIFT_VERSION: !reference [ .forklift, variables, FL_FORKLIFT_VERSION ] before_script: - - !reference [.common-before-script, before_script] - - !reference [.prepare-env, before_script] - - !reference [.rust-info-script, script] - - !reference [.forklift-cache, before_script] + - !reference [ .common-before-script, before_script ] + - !reference [ .prepare-env, before_script ] + - !reference [ .rust-info-script, script ] + - !reference [ .forklift-cache, before_script ] tags: - linux-docker @@ -269,56 +269,6 @@ remove-cancel-pipeline-message: trigger: project: "parity/infrastructure/ci_cd/pipeline-stopper" -cancel-pipeline-cargo-check-benches1: - extends: .cancel-pipeline-template - needs: - - job: "cargo-check-benches 1/2" - -cancel-pipeline-cargo-check-benches2: - extends: .cancel-pipeline-template - needs: - - job: "cargo-check-benches 2/2" - -cancel-pipeline-cargo-check-each-crate-1: - extends: .cancel-pipeline-template - needs: - - job: "cargo-check-each-crate 1/6" - -cancel-pipeline-cargo-check-each-crate-2: - extends: .cancel-pipeline-template - needs: - - job: "cargo-check-each-crate 2/6" - -cancel-pipeline-cargo-check-each-crate-3: - extends: .cancel-pipeline-template - needs: - - job: "cargo-check-each-crate 3/6" - -cancel-pipeline-cargo-check-each-crate-4: - extends: .cancel-pipeline-template - needs: - - job: "cargo-check-each-crate 4/6" - -cancel-pipeline-cargo-check-each-crate-5: - extends: .cancel-pipeline-template - needs: - - job: "cargo-check-each-crate 5/6" - -cancel-pipeline-cargo-check-each-crate-6: - extends: .cancel-pipeline-template - needs: - - job: "cargo-check-each-crate 6/6" - -cancel-pipeline-cargo-check-each-crate-macos: - extends: .cancel-pipeline-template - needs: - - job: cargo-check-each-crate-macos - -cancel-pipeline-check-tracing: - extends: .cancel-pipeline-template - needs: - - job: check-tracing - cancel-pipeline-build-linux-stable: extends: .cancel-pipeline-template needs: @@ -334,21 +284,6 @@ cancel-pipeline-build-linux-substrate: needs: - job: build-linux-substrate -cancel-pipeline-test-node-metrics: - extends: .cancel-pipeline-template - needs: - - job: test-node-metrics - -cancel-pipeline-test-frame-ui: - extends: .cancel-pipeline-template - needs: - - job: test-frame-ui - -cancel-pipeline-test-frame-examples-compile-to-wasm: - extends: .cancel-pipeline-template - needs: - - job: test-frame-examples-compile-to-wasm - cancel-pipeline-build-short-benchmark: extends: .cancel-pipeline-template needs: diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index ca3a2394fb39..00a0aa2c9771 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -137,159 +137,6 @@ test-rustdoc: script: - time cargo doc --workspace --all-features --no-deps -test-node-metrics: - stage: test - extends: - - .docker-env - - .common-refs - - .run-immediately - - .collect-artifacts-short - variables: - RUST_TOOLCHAIN: stable - # Enable debug assertions since we are running optimized builds for testing - # but still want to have debug assertions. - RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" - script: - # Build the required workers. - - cargo build --bin polkadot-execute-worker --bin polkadot-prepare-worker --profile testnet --verbose --locked - - mkdir -p artifacts - - time cargo test --profile testnet - --locked - --features=runtime-metrics -p polkadot-node-metrics > artifacts/log.txt - -test-deterministic-wasm: - stage: test - extends: - - .docker-env - - .common-refs - # DAG - needs: - - job: test-frame-ui - artifacts: false - script: - # build runtime - - WASM_BUILD_NO_COLOR=1 cargo build -q --locked --release -p westend-runtime -p rococo-runtime - # make checksum - - sha256sum target/release/wbuild/*-runtime/target/wasm32-unknown-unknown/release/*.wasm > checksum.sha256 - - cargo clean - # build again - - WASM_BUILD_NO_COLOR=1 cargo build -q --locked --release -p westend-runtime -p rococo-runtime - # confirm checksum - - sha256sum -c checksum.sha256 - -cargo-check-benches: - stage: test - artifacts: - expire_in: 10 days - variables: - CI_JOB_NAME: "cargo-check-benches" - extends: - - .docker-env - - .common-refs - - .run-immediately - - .collect-artifacts - - .pipeline-stopper-artifacts - before_script: - # TODO: DON'T FORGET TO CHANGE FOR PROD VALUES!!! - # merges in the master branch on PRs. skip if base is not master - - 'if [ $CI_COMMIT_REF_NAME != "master" ]; then - BASE=$(curl -s -H "Authorization: Bearer ${GITHUB_PR_TOKEN}" https://api.github.com/repos/paritytech-stg/polkadot-sdk/pulls/${CI_COMMIT_REF_NAME} | jq -r .base.ref); - printf "Merging base branch %s\n" "${BASE:=master}"; - if [ $BASE != "master" ]; then - echo "$BASE is not master, skipping merge"; - else - git config user.email "ci@gitlab.parity.io"; - git fetch origin "refs/heads/${BASE}"; - git merge --verbose --no-edit FETCH_HEAD; - fi - fi' - - !reference [.forklift-cache, before_script] - parallel: 2 - script: - - mkdir -p ./artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA - # this job is executed in parallel on two runners - - echo "___Running benchmarks___"; - - case ${CI_NODE_INDEX} in - 1) - SKIP_WASM_BUILD=1 time cargo check --locked --benches --all; - cargo run --locked --release -p node-bench -- ::trie::read::small --json - | tee ./artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA/::trie::read::small.json; - echo "___Cache could be uploaded___"; - ;; - 2) - cargo run --locked --release -p node-bench -- ::node::import::sr25519::transfer_keep_alive::paritydb::small --json - | tee ./artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA/::node::import::sr25519::transfer_keep_alive::paritydb::small.json - ;; - esac - -node-bench-regression-guard: - # it's not belong to `build` semantically, but dag jobs can't depend on each other - # within the single stage - https://gitlab.com/gitlab-org/gitlab/-/issues/30632 - # more: https://github.com/paritytech/substrate/pull/8519#discussion_r608012402 - stage: build - extends: - - .docker-env - - .common-refs - needs: - # this is a DAG - - job: cargo-check-benches - artifacts: true - # polls artifact from master to compare with current result - # need to specify both parallel jobs from master because of the bug - # https://gitlab.com/gitlab-org/gitlab/-/issues/39063 - - project: $CI_PROJECT_PATH - job: "cargo-check-benches 1/2" - ref: master - artifacts: true - - project: $CI_PROJECT_PATH - job: "cargo-check-benches 2/2" - ref: master - artifacts: true - variables: - CI_IMAGE: "paritytech/node-bench-regression-guard:latest" - before_script: [""] - script: - - if [ $(ls -la artifacts/benches/ | grep master | wc -l) == 0 ]; then - echo "Couldn't find master artifacts"; - exit 1; - fi - - echo "------- IMPORTANT -------" - - echo "node-bench-regression-guard depends on the results of a cargo-check-benches job" - - echo "In case of this job failure, check your pipeline's cargo-check-benches" - - "node-bench-regression-guard --reference artifacts/benches/master-* - --compare-with artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA" - after_script: [""] - -# if this fails run `bot update-ui` in the Pull Request or "./scripts/update-ui-tests.sh" locally -# see ./docs/contributor/CONTRIBUTING.md#ui-tests -test-frame-ui: - stage: test - extends: - - .docker-env - - .common-refs - # DAG - needs: - - job: test-frame-examples-compile-to-wasm - artifacts: false - variables: - # Enable debug assertions since we are running optimized builds for testing - # but still want to have debug assertions. - RUSTFLAGS: "-C debug-assertions -D warnings" - RUST_BACKTRACE: 1 - WASM_BUILD_NO_COLOR: 1 - WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings" - # Ensure we run the UI tests. - RUN_UI_TESTS: 1 - script: - - time cargo test --locked -q --profile testnet -p frame-support-test --features=frame-feature-testing,no-metadata-docs,try-runtime,experimental - - time cargo test --locked -q --profile testnet -p frame-support-test --features=frame-feature-testing,frame-feature-testing-2,no-metadata-docs,try-runtime,experimental - - time cargo test --locked -q --profile testnet -p xcm-procedural - - time cargo test --locked -q --profile testnet -p frame-election-provider-solution-type - - time cargo test --locked -q --profile testnet -p sp-api-test - # There is multiple version of sp-runtime-interface in the repo. So we point to the manifest. - - time cargo test --locked -q --profile testnet --manifest-path substrate/primitives/runtime-interface/Cargo.toml - - cat /cargo_target_dir/debug/.fingerprint/memory_units-759eddf317490d2b/lib-memory_units.json || true - quick-benchmarks-omni: stage: test extends: @@ -306,144 +153,3 @@ quick-benchmarks-omni: script: - time cargo build --locked --quiet --release -p asset-hub-westend-runtime --features runtime-benchmarks - time cargo run --locked --release -p frame-omni-bencher --quiet -- v1 benchmark pallet --runtime target/release/wbuild/asset-hub-westend-runtime/asset_hub_westend_runtime.compact.compressed.wasm --all --steps 2 --repeat 1 --quiet - -test-frame-examples-compile-to-wasm: - # into one job - stage: test - extends: - - .docker-env - - .common-refs - # DAG - needs: - - job: test-full-crypto-feature - artifacts: false - variables: - # Enable debug assertions since we are running optimized builds for testing - # but still want to have debug assertions. - RUSTFLAGS: "-C debug-assertions" - RUST_BACKTRACE: 1 - script: - - cd ./substrate/frame/examples/offchain-worker/ - - cargo build --locked --target=wasm32-unknown-unknown --no-default-features - - cd ../basic - - cargo build --locked --target=wasm32-unknown-unknown --no-default-features - # FIXME - allow_failure: true - -# more information about this job can be found here: -# https://github.com/paritytech/substrate/pull/6916 -check-tracing: - stage: test - extends: - - .docker-env - - .common-refs - - .run-immediately - - .pipeline-stopper-artifacts - script: - # with-tracing must be explicitly activated, we run a test to ensure this works as expected in both cases - - time cargo test --locked --manifest-path ./substrate/primitives/tracing/Cargo.toml --no-default-features - - time cargo test --locked --manifest-path ./substrate/primitives/tracing/Cargo.toml --no-default-features --features=with-tracing - -# Check that `westend-runtime` compiles with the `metadata-hash` feature enabled. -check-metadata-hash: - stage: test - extends: - - .docker-env - - .common-refs - - .run-immediately - - .pipeline-stopper-artifacts - script: - - time cargo build --locked -p westend-runtime --features metadata-hash - -# more information about this job can be found here: -# https://github.com/paritytech/substrate/pull/3778 -test-full-crypto-feature: - stage: test - extends: - - .docker-env - - .common-refs - - .run-immediately - variables: - # Enable debug assertions since we are running optimized builds for testing - # but still want to have debug assertions. - RUSTFLAGS: "-C debug-assertions" - RUST_BACKTRACE: 1 - script: - - cd substrate/primitives/core/ - - time cargo build --locked --no-default-features --features full_crypto - - cd ../application-crypto - - time cargo build --locked --no-default-features --features full_crypto - -cargo-check-each-crate: - stage: test - extends: - - .docker-env - - .common-refs - - .run-immediately - # - .collect-artifacts - variables: - RUSTFLAGS: "-D warnings" - # $CI_JOB_NAME is set manually so that cache could be shared for all jobs - # "cargo-check-each-crate I/N" jobs - CI_JOB_NAME: cargo-check-each-crate - timeout: 2h - script: - - PYTHONUNBUFFERED=x time .gitlab/check-each-crate.py "$CI_NODE_INDEX" "$CI_NODE_TOTAL" - parallel: 6 - -cargo-check-each-crate-macos: - stage: test - extends: - - .docker-env - - .common-refs - - .run-immediately - # - .collect-artifacts - before_script: - # skip timestamp script, the osx bash doesn't support printf %()T - - !reference [.job-switcher, before_script] - - !reference [.rust-info-script, script] - - !reference [.pipeline-stopper-vars, script] - variables: - SKIP_WASM_BUILD: 1 - script: - # TODO: use parallel jobs, as per cargo-check-each-crate, once more Mac runners are available - # - time ./scripts/ci/gitlab/check-each-crate.py 1 1 - - time cargo check --workspace --locked - timeout: 2h - tags: - - osx - -cargo-hfuzz: - stage: test - extends: - - .docker-env - - .common-refs - # DAG - needs: - - job: check-tracing - artifacts: false - variables: - # max 10s per iteration, 60s per file - HFUZZ_RUN_ARGS: > - --exit_upon_crash - --exit_code_upon_crash 1 - --timeout 10 - --run_time 60 - # use git version of honggfuzz-rs until v0.5.56 is out, we need a few recent changes: - # https://github.com/rust-fuzz/honggfuzz-rs/pull/75 to avoid breakage on debian - # https://github.com/rust-fuzz/honggfuzz-rs/pull/81 fix to the above pr - # https://github.com/rust-fuzz/honggfuzz-rs/pull/82 fix for handling absolute CARGO_TARGET_DIR - HFUZZ_BUILD_ARGS: > - --config=patch.crates-io.honggfuzz.git="https://github.com/altaua/honggfuzz-rs" - --config=patch.crates-io.honggfuzz.rev="205f7c8c059a0d98fe1cb912cdac84f324cb6981" - artifacts: - name: "hfuzz-$CI_COMMIT_SHORT_SHA" - expire_in: 7 days - when: on_failure - paths: - - substrate/primitives/arithmetic/fuzzer/hfuzz_workspace/ - script: - - cd ./substrate/primitives/arithmetic/fuzzer - - cargo hfuzz build - - for target in $(cargo read-manifest | jq -r '.targets | .[] | .name'); do - cargo hfuzz run "$target" || { printf "fuzzing failure for %s\n" "$target"; exit 1; }; done From b9b34fb983dac58ae05b0e1379e20363f6f7c88e Mon Sep 17 00:00:00 2001 From: Evgeny Snitko Date: Thu, 5 Sep 2024 19:27:18 +0400 Subject: [PATCH 02/66] Github actions coverage (#5148) Jobs for coverage collections and upload to codecov for github PR's Uses same test suit as test-linux-stable, splits tests into 5 parallel jobs, uploads to codecov, generates report comment and status checks (can be made required) | | | | --- | --- | | image | image | Codecov behavior (required coverage, thresholds, comment info etc.) is highly customizable via `.github/codecov.yaml` ([reference](https://docs.codecov.com/docs/codecovyml-reference)) Unfortunately, some tests are excluded because with `-C instrument-coverage` they run very slowly, flaky, or fail (see [nextest filter expression](https://github.com/paritytech/polkadot-sdk/pull/5148/files#diff-b19504a9520a2498d03020108344d8e6d93d254d812bfa26247faaa7f55263d6R80) of test-linux-stable-coverage). So for now, this workflow is optional, and will only run for pr's with the `GHA-coverage` label --- .github/codecov.yml | 8 +- .../workflows/tests-linux-stable-coverage.yml | 143 ++++++++++++++++++ substrate/bin/node/runtime/src/lib.rs | 7 +- substrate/frame/babe/src/mock.rs | 6 +- substrate/frame/grandpa/src/mock.rs | 6 +- 5 files changed, 159 insertions(+), 11 deletions(-) create mode 100644 .github/workflows/tests-linux-stable-coverage.yml diff --git a/.github/codecov.yml b/.github/codecov.yml index ceceb9e63654..b237c9fe6b04 100644 --- a/.github/codecov.yml +++ b/.github/codecov.yml @@ -6,4 +6,10 @@ coverage: project: default: target: 1.0 - threshold: 2.0 \ No newline at end of file + threshold: 2.0 + +comment: + behavior: new + +fixes: + - "/__w/polkadot-sdk/polkadot-sdk/::" \ No newline at end of file diff --git a/.github/workflows/tests-linux-stable-coverage.yml b/.github/workflows/tests-linux-stable-coverage.yml new file mode 100644 index 000000000000..ddf0642a4043 --- /dev/null +++ b/.github/workflows/tests-linux-stable-coverage.yml @@ -0,0 +1,143 @@ +# GHA for test-linux-stable-int, test-linux-stable, test-linux-stable-oldkernel +name: tests linux stable coverage + +on: + push: + branches: + - master + pull_request: + types: [opened, synchronize, reopened, ready_for_review, labeled] + merge_group: +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +jobs: + + set-image: + # GitHub Actions allows using 'env' in a container context. + # However, env variables don't work for forks: https://github.com/orgs/community/discussions/44322 + # This workaround sets the container image for each job using 'set-image' job output. + if: contains(github.event.label.name, 'GHA-coverage') || contains(github.event.pull_request.labels.*.name, 'GHA-coverage') + runs-on: ubuntu-latest + outputs: + IMAGE: ${{ steps.set_image.outputs.IMAGE }} + RUNNER: ${{ steps.set_runner.outputs.RUNNER }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - id: set_image + run: cat .github/env >> $GITHUB_OUTPUT + - id: set_runner + run: | + # Run merge queues on persistent runners + if [[ $GITHUB_REF_NAME == *"gh-readonly-queue"* ]]; then + echo "RUNNER=arc-runners-polkadot-sdk-beefy-persistent" >> $GITHUB_OUTPUT + else + echo "RUNNER=arc-runners-polkadot-sdk-beefy" >> $GITHUB_OUTPUT + fi + + # + # + # + test-linux-stable-coverage: + needs: [set-image] + runs-on: ${{ needs.set-image.outputs.RUNNER }} + timeout-minutes: 120 + container: + image: ${{ needs.set-image.outputs.IMAGE }} + env: + RUST_TOOLCHAIN: stable + # Enable debug assertions since we are running optimized builds for testing + # but still want to have debug assertions. + # + # -Cinstrument-coverage slows everything down but it is necessary for code coverage + # https://doc.rust-lang.org/rustc/instrument-coverage.html + RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings -Cinstrument-coverage" + LLVM_PROFILE_FILE: "/__w/polkadot-sdk/polkadot-sdk/target/coverage/cargo-test-${{ matrix.ci_node_index }}-%p-%m.profraw" + strategy: + fail-fast: false + matrix: + ci_node_index: [1, 2, 3, 4, 5] + ci_node_total: [5] + steps: + - name: Checkout + uses: actions/checkout@v4 + + - run: rustup component add llvm-tools-preview + - run: cargo install cargo-llvm-cov + + - run: mkdir -p target/coverage + + # Some tests are excluded because they run very slowly or fail with -Cinstrument-coverage + - name: run tests + run: > + time cargo llvm-cov nextest + --no-report --release + --workspace + --locked --no-fail-fast + --features try-runtime,ci-only-tests,experimental,riscv + --filter-expr " + !test(/.*benchmark.*/) + - test(/recovers_from_only_chunks_if_pov_large::case_1/) + - test(/participation_requests_reprioritized_for_newly_included/) + - test(/availability_is_recovered_from_chunks_if_no_group_provided::case_1/) + - test(/rejects_missing_inherent_digest/) + - test(/availability_is_recovered_from_chunks_even_if_backing_group_supplied_if_chunks_only::case_1/) + - test(/availability_is_recovered_from_chunks_if_no_group_provided::case_2/) + - test(/all_security_features_work/) + - test(/nonexistent_cache_dir/) + - test(/recovers_from_only_chunks_if_pov_large::case_3/) + - test(/recovers_from_only_chunks_if_pov_large::case_2/) + - test(/authoring_blocks/) + - test(/rejects_missing_seals/) + - test(/generate_chain_spec/) + - test(/get_preset/) + - test(/list_presets/) + - test(/tests::receive_rate_limit_is_enforced/) + - test(/polkadot-availability-recovery/) + " + --partition count:${{ matrix.ci_node_index }}/${{ matrix.ci_node_total }} + + - name: generate report + run: cargo llvm-cov report --release --codecov --output-path coverage-${{ matrix.ci_node_index }}.lcov + - name: upload report + uses: actions/upload-artifact@v4 + with: + name: coverage-report-${{ matrix.ci_node_index }}.lcov + path: coverage-${{ matrix.ci_node_index }}.lcov + + # + # + # Upload to codecov + upload-reports: + needs: [test-linux-stable-coverage] + runs-on: ubuntu-latest + steps: + - name: Download artifacts + uses: actions/download-artifact@v4 + with: + path: reports + pattern: coverage-report-* + merge-multiple: true + - run: ls -al reports/ + - name: Upload to Codecov + uses: codecov/codecov-action@v4 + with: + token: ${{ secrets.CODECOV_TOKEN }} + verbose: true + directory: reports + root_dir: /__w/polkadot-sdk/polkadot-sdk/ + + # + # + # + remove-label: + runs-on: ubuntu-latest + needs: [upload-reports] + if: github.event_name == 'pull_request' + steps: + - uses: actions/checkout@v2 + - uses: actions-ecosystem/action-remove-labels@v1 + with: + labels: GHA-coverage \ No newline at end of file diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 001b2273c9b2..6ae04902aa82 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -507,8 +507,7 @@ impl pallet_babe::Config for Runtime { type WeightInfo = (); type MaxAuthorities = MaxAuthorities; type MaxNominators = MaxNominators; - type KeyOwnerProof = - >::Proof; + type KeyOwnerProof = sp_session::MembershipProof; type EquivocationReportSystem = pallet_babe::EquivocationReportSystem; } @@ -1534,7 +1533,7 @@ impl pallet_grandpa::Config for Runtime { type MaxAuthorities = MaxAuthorities; type MaxNominators = MaxNominators; type MaxSetIdSessionEntries = MaxSetIdSessionEntries; - type KeyOwnerProof = >::Proof; + type KeyOwnerProof = sp_session::MembershipProof; type EquivocationReportSystem = pallet_grandpa::EquivocationReportSystem; } @@ -2614,7 +2613,7 @@ impl pallet_beefy::Config for Runtime { type OnNewValidatorSet = MmrLeaf; type AncestryHelper = MmrLeaf; type WeightInfo = (); - type KeyOwnerProof = >::Proof; + type KeyOwnerProof = sp_session::MembershipProof; type EquivocationReportSystem = pallet_beefy::EquivocationReportSystem; } diff --git a/substrate/frame/babe/src/mock.rs b/substrate/frame/babe/src/mock.rs index 912cb3e27cd5..4e4052b2b566 100644 --- a/substrate/frame/babe/src/mock.rs +++ b/substrate/frame/babe/src/mock.rs @@ -25,12 +25,12 @@ use frame_election_provider_support::{ }; use frame_support::{ derive_impl, parameter_types, - traits::{ConstU128, ConstU32, ConstU64, KeyOwnerProofSystem, OnInitialize}, + traits::{ConstU128, ConstU32, ConstU64, OnInitialize}, }; use pallet_session::historical as pallet_session_historical; use sp_consensus_babe::{AuthorityId, AuthorityPair, Randomness, Slot, VrfSignature}; use sp_core::{ - crypto::{KeyTypeId, Pair, VrfSecret}, + crypto::{Pair, VrfSecret}, U256, }; use sp_io; @@ -182,7 +182,7 @@ impl Config for Test { type WeightInfo = (); type MaxAuthorities = ConstU32<10>; type MaxNominators = ConstU32<100>; - type KeyOwnerProof = >::Proof; + type KeyOwnerProof = sp_session::MembershipProof; type EquivocationReportSystem = super::EquivocationReportSystem; } diff --git a/substrate/frame/grandpa/src/mock.rs b/substrate/frame/grandpa/src/mock.rs index ae230a0209a7..caac4107cfb7 100644 --- a/substrate/frame/grandpa/src/mock.rs +++ b/substrate/frame/grandpa/src/mock.rs @@ -28,11 +28,11 @@ use frame_election_provider_support::{ }; use frame_support::{ derive_impl, parameter_types, - traits::{ConstU128, ConstU32, ConstU64, KeyOwnerProofSystem, OnFinalize, OnInitialize}, + traits::{ConstU128, ConstU32, ConstU64, OnFinalize, OnInitialize}, }; use pallet_session::historical as pallet_session_historical; use sp_consensus_grandpa::{RoundNumber, SetId, GRANDPA_ENGINE_ID}; -use sp_core::{crypto::KeyTypeId, H256}; +use sp_core::H256; use sp_keyring::Ed25519Keyring; use sp_runtime::{ curve::PiecewiseLinear, @@ -186,7 +186,7 @@ impl Config for Test { type MaxAuthorities = ConstU32<100>; type MaxNominators = ConstU32<1000>; type MaxSetIdSessionEntries = MaxSetIdSessionEntries; - type KeyOwnerProof = >::Proof; + type KeyOwnerProof = sp_session::MembershipProof; type EquivocationReportSystem = super::EquivocationReportSystem; } From 702a15cbaa032899f2321fda892faf723d32efca Mon Sep 17 00:00:00 2001 From: Maksym H <1177472+mordamax@users.noreply.github.com> Date: Thu, 5 Sep 2024 17:02:24 +0100 Subject: [PATCH 03/66] minor fixes pipeline (#5607) - [return macos jobs to gitlab](https://github.com/paritytech/polkadot-sdk/commit/dcd44b1d8bb681b66cbc0a063a6a999bd8253cdc) - [add benches to merge queue](https://github.com/paritytech/polkadot-sdk/commit/494eb21bb9ac4633f3217e6b58ba7256aea6e38a) - [require test-deterministic-wasm and run it earlier](https://github.com/paritytech/polkadot-sdk/commit/ab9ae5ca6c5128e002cc745d608e542138633250) --- .github/workflows/tests-misc.yml | 69 +++++++++++++------------------- .gitlab-ci.yml | 5 +++ .gitlab/pipeline/test.yml | 22 ++++++++++ 3 files changed, 54 insertions(+), 42 deletions(-) diff --git a/.github/workflows/tests-misc.yml b/.github/workflows/tests-misc.yml index 824e8c11c2a4..2e78f4a34ede 100644 --- a/.github/workflows/tests-misc.yml +++ b/.github/workflows/tests-misc.yml @@ -121,7 +121,7 @@ jobs: test-deterministic-wasm: timeout-minutes: 20 - needs: [ set-image, test-frame-ui ] + needs: [ set-image ] runs-on: ${{ needs.set-image.outputs.RUNNER }} container: image: ${{ needs.set-image.outputs.IMAGE }} @@ -144,7 +144,7 @@ jobs: cargo-check-benches-branches: needs: [ set-image ] - if: ${{ github.event_name == 'pull_request' }} + if: ${{ github.event_name == 'pull_request' || github.event_name == 'merge_group' }} timeout-minutes: 60 outputs: branch: ${{ steps.branch.outputs.branch }} @@ -354,46 +354,30 @@ jobs: cp .forklift/config.toml /github/home/.forklift/config.toml PYTHONUNBUFFERED=x .github/scripts/check-each-crate.py ${{ matrix.index }} ${{ strategy.job-total }} - # TODO: enable when we have a macos Self-Hosted runners - # cargo-check-each-crate-macos: - # timeout-minutes: 120 - # needs: [ set-image ] - # runs-on: macos-latest - # env: - # RUSTFLAGS: "-D warnings" - # CI_JOB_NAME: cargo-check-each-crate - # IMAGE: ${{ needs.set-image.outputs.IMAGE }} - # strategy: - # fail-fast: false - # matrix: - # index: [ 1,2,3,4,5,6,7,8,9,10 ] # 10 parallel jobs - # steps: - # - name: Checkout - # uses: actions/checkout@v4.1.7 - # - # - run: | - # VERSION=$(echo $IMAGE | sed -E 's/.*:bullseye-([^-]+)-.*/\1/') - # echo $VERSION - # echo "VERSION=$VERSION" >> $GITHUB_ENV - # - # - run: | - # rustup install $VERSION - # rustup default $VERSION - # - # - name: Check Rust - # run: | - # rustup show - # rustup +nightly show - # - # - name: MacOS Deps - # run: | - # brew install protobuf openssl pkg-config zlib xz zstd llvm jq curl gcc make cmake - # rustup target add wasm32-unknown-unknown --toolchain $VERSION - # rustup component add rust-src rustfmt clippy --toolchain $VERSION - # - # - name: script - # run: | - # PYTHONUNBUFFERED=x .github/scripts/check-each-crate.py ${{ matrix.index }} ${{ strategy.job-total }} True + # cargo-check-each-crate-macos: + # timeout-minutes: 120 + # needs: [ set-image ] + # runs-on: macOS + # env: + # RUSTFLAGS: "-D warnings" + # CI_JOB_NAME: cargo-check-each-crate + # IMAGE: ${{ needs.set-image.outputs.IMAGE }} + # strategy: + # fail-fast: false + # matrix: + # index: [ 1,2,3,4,5,6,7,8,9,10 ] # 10 parallel jobs + # steps: + # - name: Checkout + # uses: actions/checkout@v4.1.7 + + # - name: Install dependencies + # uses: ./.github/actions/set-up-mac + # with: + # IMAGE: ${{ needs.set-image.outputs.IMAGE }} + + # - name: script + # run: | + # PYTHONUNBUFFERED=x .github/scripts/check-each-crate.py ${{ matrix.index }} ${{ strategy.job-total }} True confirm-required-test-misc-jobs-passed: runs-on: ubuntu-latest @@ -408,6 +392,7 @@ jobs: - test-node-metrics - check-tracing - cargo-check-each-crate + - test-deterministic-wasm # - cargo-hfuzz remove from required for now, as it's flaky steps: - run: echo '### Good job! All the required tests passed 🚀' >> $GITHUB_STEP_SUMMARY \ No newline at end of file diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 8b4ca48150b1..43123cdbfc41 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -288,3 +288,8 @@ cancel-pipeline-build-short-benchmark: extends: .cancel-pipeline-template needs: - job: build-short-benchmark + +cancel-pipeline-cargo-check-each-crate-macos: + extends: .cancel-pipeline-template + needs: + - job: cargo-check-each-crate-macos \ No newline at end of file diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index 00a0aa2c9771..0879870ae13c 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -153,3 +153,25 @@ quick-benchmarks-omni: script: - time cargo build --locked --quiet --release -p asset-hub-westend-runtime --features runtime-benchmarks - time cargo run --locked --release -p frame-omni-bencher --quiet -- v1 benchmark pallet --runtime target/release/wbuild/asset-hub-westend-runtime/asset_hub_westend_runtime.compact.compressed.wasm --all --steps 2 --repeat 1 --quiet + +cargo-check-each-crate-macos: + stage: test + extends: + - .docker-env + - .common-refs + - .run-immediately + # - .collect-artifacts + before_script: + # skip timestamp script, the osx bash doesn't support printf %()T + - !reference [.job-switcher, before_script] + - !reference [.rust-info-script, script] + - !reference [.pipeline-stopper-vars, script] + variables: + SKIP_WASM_BUILD: 1 + script: + # TODO: use parallel jobs, as per cargo-check-each-crate, once more Mac runners are available + # - time ./scripts/ci/gitlab/check-each-crate.py 1 1 + - time cargo check --workspace --locked + timeout: 2h + tags: + - osx From 8d81f1e648a21d7d14f94bc86503d3c77ead5807 Mon Sep 17 00:00:00 2001 From: Maksym H <1177472+mordamax@users.noreply.github.com> Date: Thu, 5 Sep 2024 17:07:14 +0100 Subject: [PATCH 04/66] /cmd followups (#5533) Closes: https://github.com/paritytech/polkadot-sdk/issues/5545 - add missing template for frame & xcm benchmarks - fix `git pull` -> https://github.com/paritytech/polkadot-sdk/actions/runs/10644887539/job/29510118915 - respect runtimes headers - use GNU instead of apache for runtimes - adds tests for cmd.py Tip: review this one with Whitespace hidden ![image](https://github.com/user-attachments/assets/3bcdc6c2-7371-428f-9962-556ca81c1467) --------- Co-authored-by: GitHub Action --- .github/scripts/cmd/cmd.py | 275 +++++++++++--------- .github/scripts/cmd/test_cmd.py | 321 ++++++++++++++++++++++++ .github/workflows/cmd-tests.yml | 14 ++ .github/workflows/cmd.yml | 2 +- .github/workflows/runtimes-matrix.json | 29 +++ substrate/frame/balances/src/weights.rs | 138 +++++----- 6 files changed, 584 insertions(+), 195 deletions(-) create mode 100644 .github/scripts/cmd/test_cmd.py create mode 100644 .github/workflows/cmd-tests.yml diff --git a/.github/scripts/cmd/cmd.py b/.github/scripts/cmd/cmd.py index 63bd6a2795aa..1c08b621467d 100755 --- a/.github/scripts/cmd/cmd.py +++ b/.github/scripts/cmd/cmd.py @@ -11,6 +11,8 @@ f = open('.github/workflows/runtimes-matrix.json', 'r') runtimesMatrix = json.load(f) +print(f'runtimesMatrix: {runtimesMatrix}\n') + runtimeNames = list(map(lambda x: x['name'], runtimesMatrix)) common_args = { @@ -67,130 +69,153 @@ for arg, config in common_args.items(): parser_ui.add_argument(arg, **config) +def main(): + global args, unknown, runtimesMatrix + args, unknown = parser.parse_known_args() + + print(f'args: {args}') + + if args.command == 'bench': + runtime_pallets_map = {} + failed_benchmarks = {} + successful_benchmarks = {} + + profile = "release" + + print(f'Provided runtimes: {args.runtime}') + # convert to mapped dict + runtimesMatrix = list(filter(lambda x: x['name'] in args.runtime, runtimesMatrix)) + runtimesMatrix = {x['name']: x for x in runtimesMatrix} + print(f'Filtered out runtimes: {runtimesMatrix}') + + # loop over remaining runtimes to collect available pallets + for runtime in runtimesMatrix.values(): + os.system(f"forklift cargo build -p {runtime['package']} --profile {profile} --features runtime-benchmarks") + print(f'-- listing pallets for benchmark for {runtime["name"]}') + wasm_file = f"target/{profile}/wbuild/{runtime['package']}/{runtime['package'].replace('-', '_')}.wasm" + output = os.popen( + f"frame-omni-bencher v1 benchmark pallet --no-csv-header --no-storage-info --no-min-squares --no-median-slopes --all --list --runtime={wasm_file}").read() + raw_pallets = output.strip().split('\n') + + all_pallets = set() + for pallet in raw_pallets: + if pallet: + all_pallets.add(pallet.split(',')[0].strip()) + + pallets = list(all_pallets) + print(f'Pallets in {runtime["name"]}: {pallets}') + runtime_pallets_map[runtime['name']] = pallets + + print(f'\n') + + # filter out only the specified pallets from collected runtimes/pallets + if args.pallet: + print(f'Pallets: {args.pallet}') + new_pallets_map = {} + # keep only specified pallets if they exist in the runtime + for runtime in runtime_pallets_map: + if set(args.pallet).issubset(set(runtime_pallets_map[runtime])): + new_pallets_map[runtime] = args.pallet + + runtime_pallets_map = new_pallets_map + + print(f'Filtered out runtimes & pallets: {runtime_pallets_map}\n') + + if not runtime_pallets_map: + if args.pallet and not args.runtime: + print(f"No pallets {args.pallet} found in any runtime") + elif args.runtime and not args.pallet: + print(f"{args.runtime} runtime does not have any pallets") + elif args.runtime and args.pallet: + print(f"No pallets {args.pallet} found in {args.runtime}") + else: + print('No runtimes found') + sys.exit(1) -args, unknown = parser.parse_known_args() - -print(f'args: {args}') - -if args.command == 'bench': - runtime_pallets_map = {} - failed_benchmarks = {} - successful_benchmarks = {} - - profile = "release" - - print(f'Provided runtimes: {args.runtime}') - # convert to mapped dict - runtimesMatrix = list(filter(lambda x: x['name'] in args.runtime, runtimesMatrix)) - runtimesMatrix = {x['name']: x for x in runtimesMatrix} - print(f'Filtered out runtimes: {runtimesMatrix}') - - # loop over remaining runtimes to collect available pallets - for runtime in runtimesMatrix.values(): - os.system(f"forklift cargo build -p {runtime['package']} --profile {profile} --features runtime-benchmarks") - print(f'-- listing pallets for benchmark for {runtime["name"]}') - wasm_file = f"target/{profile}/wbuild/{runtime['package']}/{runtime['package'].replace('-', '_')}.wasm" - output = os.popen( - f"frame-omni-bencher v1 benchmark pallet --no-csv-header --no-storage-info --no-min-squares --no-median-slopes --all --list --runtime={wasm_file}").read() - raw_pallets = output.strip().split('\n') - - all_pallets = set() - for pallet in raw_pallets: - if pallet: - all_pallets.add(pallet.split(',')[0].strip()) - - pallets = list(all_pallets) - print(f'Pallets in {runtime}: {pallets}') - runtime_pallets_map[runtime['name']] = pallets - - # filter out only the specified pallets from collected runtimes/pallets - if args.pallet: - print(f'Pallet: {args.pallet}') - new_pallets_map = {} - # keep only specified pallets if they exist in the runtime for runtime in runtime_pallets_map: - if set(args.pallet).issubset(set(runtime_pallets_map[runtime])): - new_pallets_map[runtime] = args.pallet - - runtime_pallets_map = new_pallets_map - - print(f'Filtered out runtimes & pallets: {runtime_pallets_map}') - - if not runtime_pallets_map: - if args.pallet and not args.runtime: - print(f"No pallets {args.pallet} found in any runtime") - elif args.runtime and not args.pallet: - print(f"{args.runtime} runtime does not have any pallets") - elif args.runtime and args.pallet: - print(f"No pallets {args.pallet} found in {args.runtime}") - else: - print('No runtimes found') - sys.exit(1) - - header_path = os.path.abspath('./substrate/HEADER-APACHE2') - - for runtime in runtime_pallets_map: - for pallet in runtime_pallets_map[runtime]: - config = runtimesMatrix[runtime] - print(f'-- config: {config}') - if runtime == 'dev': - # to support sub-modules (https://github.com/paritytech/command-bot/issues/275) - search_manifest_path = f"cargo metadata --locked --format-version 1 --no-deps | jq -r '.packages[] | select(.name == \"{pallet.replace('_', '-')}\") | .manifest_path'" - print(f'-- running: {search_manifest_path}') - manifest_path = os.popen(search_manifest_path).read() - if not manifest_path: - print(f'-- pallet {pallet} not found in dev runtime') - exit(1) - package_dir = os.path.dirname(manifest_path) - print(f'-- package_dir: {package_dir}') - print(f'-- manifest_path: {manifest_path}') - output_path = os.path.join(package_dir, "src", "weights.rs") - else: - default_path = f"./{config['path']}/src/weights" - xcm_path = f"./{config['path']}/src/weights/xcm" - output_path = default_path if not pallet.startswith("pallet_xcm_benchmarks") else xcm_path - print(f'-- benchmarking {pallet} in {runtime} into {output_path}') - cmd = f"frame-omni-bencher v1 benchmark pallet --extrinsic=* --runtime=target/{profile}/wbuild/{config['package']}/{config['package'].replace('-', '_')}.wasm --pallet={pallet} --header={header_path} --output={output_path} --wasm-execution=compiled --steps=50 --repeat=20 --heap-pages=4096 --no-storage-info --no-min-squares --no-median-slopes" - print(f'-- Running: {cmd}') - status = os.system(cmd) - if status != 0 and not args.continue_on_fail: - print(f'Failed to benchmark {pallet} in {runtime}') - sys.exit(1) - - # Otherwise collect failed benchmarks and print them at the end - # push failed pallets to failed_benchmarks - if status != 0: - failed_benchmarks[f'{runtime}'] = failed_benchmarks.get(f'{runtime}', []) + [pallet] - else: - successful_benchmarks[f'{runtime}'] = successful_benchmarks.get(f'{runtime}', []) + [pallet] - - if failed_benchmarks: - print('❌ Failed benchmarks of runtimes/pallets:') - for runtime, pallets in failed_benchmarks.items(): - print(f'-- {runtime}: {pallets}') - - if successful_benchmarks: - print('✅ Successful benchmarks of runtimes/pallets:') - for runtime, pallets in successful_benchmarks.items(): - print(f'-- {runtime}: {pallets}') - -elif args.command == 'fmt': - command = f"cargo +nightly fmt" - print(f'Formatting with `{command}`') - nightly_status = os.system(f'{command}') - taplo_status = os.system('taplo format --config .config/taplo.toml') - - if (nightly_status != 0 or taplo_status != 0) and not args.continue_on_fail: - print('❌ Failed to format code') - sys.exit(1) - -elif args.command == 'update-ui': - command = 'sh ./scripts/update-ui-tests.sh' - print(f'Updating ui with `{command}`') - status = os.system(f'{command}') - - if status != 0 and not args.continue_on_fail: - print('❌ Failed to format code') - sys.exit(1) - -print('🚀 Done') + for pallet in runtime_pallets_map[runtime]: + config = runtimesMatrix[runtime] + header_path = os.path.abspath(config['header']) + template = None + + print(f'-- config: {config}') + if runtime == 'dev': + # to support sub-modules (https://github.com/paritytech/command-bot/issues/275) + search_manifest_path = f"cargo metadata --locked --format-version 1 --no-deps | jq -r '.packages[] | select(.name == \"{pallet.replace('_', '-')}\") | .manifest_path'" + print(f'-- running: {search_manifest_path}') + manifest_path = os.popen(search_manifest_path).read() + if not manifest_path: + print(f'-- pallet {pallet} not found in dev runtime') + exit(1) + package_dir = os.path.dirname(manifest_path) + print(f'-- package_dir: {package_dir}') + print(f'-- manifest_path: {manifest_path}') + output_path = os.path.join(package_dir, "src", "weights.rs") + template = config['template'] + else: + default_path = f"./{config['path']}/src/weights" + xcm_path = f"./{config['path']}/src/weights/xcm" + output_path = default_path + if pallet.startswith("pallet_xcm_benchmarks"): + template = config['template'] + output_path = xcm_path + + print(f'-- benchmarking {pallet} in {runtime} into {output_path}') + cmd = f"frame-omni-bencher v1 benchmark pallet " \ + f"--extrinsic=* " \ + f"--runtime=target/{profile}/wbuild/{config['package']}/{config['package'].replace('-', '_')}.wasm " \ + f"--pallet={pallet} " \ + f"--header={header_path} " \ + f"--output={output_path} " \ + f"--wasm-execution=compiled " \ + f"--steps=50 " \ + f"--repeat=20 " \ + f"--heap-pages=4096 " \ + f"{f'--template={template} ' if template else ''}" \ + f"--no-storage-info --no-min-squares --no-median-slopes" + print(f'-- Running: {cmd} \n') + status = os.system(cmd) + if status != 0 and not args.continue_on_fail: + print(f'Failed to benchmark {pallet} in {runtime}') + sys.exit(1) + + # Otherwise collect failed benchmarks and print them at the end + # push failed pallets to failed_benchmarks + if status != 0: + failed_benchmarks[f'{runtime}'] = failed_benchmarks.get(f'{runtime}', []) + [pallet] + else: + successful_benchmarks[f'{runtime}'] = successful_benchmarks.get(f'{runtime}', []) + [pallet] + + if failed_benchmarks: + print('❌ Failed benchmarks of runtimes/pallets:') + for runtime, pallets in failed_benchmarks.items(): + print(f'-- {runtime}: {pallets}') + + if successful_benchmarks: + print('✅ Successful benchmarks of runtimes/pallets:') + for runtime, pallets in successful_benchmarks.items(): + print(f'-- {runtime}: {pallets}') + + elif args.command == 'fmt': + command = f"cargo +nightly fmt" + print(f'Formatting with `{command}`') + nightly_status = os.system(f'{command}') + taplo_status = os.system('taplo format --config .config/taplo.toml') + + if (nightly_status != 0 or taplo_status != 0) and not args.continue_on_fail: + print('❌ Failed to format code') + sys.exit(1) + + elif args.command == 'update-ui': + command = 'sh ./scripts/update-ui-tests.sh' + print(f'Updating ui with `{command}`') + status = os.system(f'{command}') + + if status != 0 and not args.continue_on_fail: + print('❌ Failed to format code') + sys.exit(1) + + print('🚀 Done') + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/.github/scripts/cmd/test_cmd.py b/.github/scripts/cmd/test_cmd.py new file mode 100644 index 000000000000..4cf1b290915d --- /dev/null +++ b/.github/scripts/cmd/test_cmd.py @@ -0,0 +1,321 @@ +import unittest +from unittest.mock import patch, mock_open, MagicMock, call +import json +import sys +import os +import argparse + +# Mock data for runtimes-matrix.json +mock_runtimes_matrix = [ + {"name": "dev", "package": "kitchensink-runtime", "path": "substrate/frame", "header": "substrate/HEADER-APACHE2", "template": "substrate/.maintain/frame-weight-template.hbs"}, + {"name": "westend", "package": "westend-runtime", "path": "polkadot/runtime/westend", "header": "polkadot/file_header.txt", "template": "polkadot/xcm/pallet-xcm-benchmarks/template.hbs"}, + {"name": "rococo", "package": "rococo-runtime", "path": "polkadot/runtime/rococo", "header": "polkadot/file_header.txt", "template": "polkadot/xcm/pallet-xcm-benchmarks/template.hbs"}, + {"name": "asset-hub-westend", "package": "asset-hub-westend-runtime", "path": "cumulus/parachains/runtimes/assets/asset-hub-westend", "header": "cumulus/file_header.txt", "template": "cumulus/templates/xcm-bench-template.hbs"}, +] + +def get_mock_bench_output(runtime, pallets, output_path, header, template = None): + return f"frame-omni-bencher v1 benchmark pallet --extrinsic=* " \ + f"--runtime=target/release/wbuild/{runtime}-runtime/{runtime.replace('-', '_')}_runtime.wasm " \ + f"--pallet={pallets} --header={header} " \ + f"--output={output_path} " \ + f"--wasm-execution=compiled " \ + f"--steps=50 --repeat=20 --heap-pages=4096 " \ + f"{f'--template={template} ' if template else ''}" \ + f"--no-storage-info --no-min-squares --no-median-slopes" + +class TestCmd(unittest.TestCase): + + def setUp(self): + self.patcher1 = patch('builtins.open', new_callable=mock_open, read_data=json.dumps(mock_runtimes_matrix)) + self.patcher2 = patch('json.load', return_value=mock_runtimes_matrix) + self.patcher3 = patch('argparse.ArgumentParser.parse_known_args') + self.patcher4 = patch('os.system', return_value=0) + self.patcher5 = patch('os.popen') + + self.mock_open = self.patcher1.start() + self.mock_json_load = self.patcher2.start() + self.mock_parse_args = self.patcher3.start() + self.mock_system = self.patcher4.start() + self.mock_popen = self.patcher5.start() + + # Ensure that cmd.py uses the mock_runtimes_matrix + import cmd + cmd.runtimesMatrix = mock_runtimes_matrix + + def tearDown(self): + self.patcher1.stop() + self.patcher2.stop() + self.patcher3.stop() + self.patcher4.stop() + self.patcher5.stop() + + def test_bench_command_normal_execution_all_runtimes(self): + self.mock_parse_args.return_value = (argparse.Namespace( + command='bench', + runtime=list(map(lambda x: x['name'], mock_runtimes_matrix)), + pallet=['pallet_balances'], + continue_on_fail=False, + quiet=False, + clean=False, + image=None + ), []) + + self.mock_popen.return_value.read.side_effect = [ + "pallet_balances\npallet_staking\npallet_something\n", # Output for dev runtime + "pallet_balances\npallet_staking\npallet_something\n", # Output for westend runtime + "pallet_staking\npallet_something\n", # Output for rococo runtime - no pallet here + "pallet_balances\npallet_staking\npallet_something\n", # Output for asset-hub-westend runtime + "./substrate/frame/balances/Cargo.toml\n", # Mock manifest path for dev -> pallet_balances + ] + + with patch('sys.exit') as mock_exit: + import cmd + cmd.main() + mock_exit.assert_not_called() + + expected_calls = [ + # Build calls + call("forklift cargo build -p kitchensink-runtime --profile release --features runtime-benchmarks"), + call("forklift cargo build -p westend-runtime --profile release --features runtime-benchmarks"), + call("forklift cargo build -p rococo-runtime --profile release --features runtime-benchmarks"), + call("forklift cargo build -p asset-hub-westend-runtime --profile release --features runtime-benchmarks"), + + call(get_mock_bench_output('kitchensink', 'pallet_balances', './substrate/frame/balances/src/weights.rs', os.path.abspath('substrate/HEADER-APACHE2'), "substrate/.maintain/frame-weight-template.hbs")), + call(get_mock_bench_output('westend', 'pallet_balances', './polkadot/runtime/westend/src/weights', os.path.abspath('polkadot/file_header.txt'))), + # skips rococo benchmark + call(get_mock_bench_output('asset-hub-westend', 'pallet_balances', './cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights', os.path.abspath('cumulus/file_header.txt'))), + ] + self.mock_system.assert_has_calls(expected_calls, any_order=True) + + def test_bench_command_normal_execution(self): + self.mock_parse_args.return_value = (argparse.Namespace( + command='bench', + runtime=['westend'], + pallet=['pallet_balances', 'pallet_staking'], + continue_on_fail=False, + quiet=False, + clean=False, + image=None + ), []) + header_path = os.path.abspath('polkadot/file_header.txt') + self.mock_popen.return_value.read.side_effect = [ + "pallet_balances\npallet_staking\npallet_something\n", # Output for westend runtime + ] + + with patch('sys.exit') as mock_exit: + import cmd + cmd.main() + mock_exit.assert_not_called() + + expected_calls = [ + # Build calls + call("forklift cargo build -p westend-runtime --profile release --features runtime-benchmarks"), + + # Westend runtime calls + call(get_mock_bench_output('westend', 'pallet_balances', './polkadot/runtime/westend/src/weights', header_path)), + call(get_mock_bench_output('westend', 'pallet_staking', './polkadot/runtime/westend/src/weights', header_path)), + ] + self.mock_system.assert_has_calls(expected_calls, any_order=True) + + + def test_bench_command_normal_execution_xcm(self): + self.mock_parse_args.return_value = (argparse.Namespace( + command='bench', + runtime=['westend'], + pallet=['pallet_xcm_benchmarks::generic'], + continue_on_fail=False, + quiet=False, + clean=False, + image=None + ), []) + header_path = os.path.abspath('polkadot/file_header.txt') + self.mock_popen.return_value.read.side_effect = [ + "pallet_balances\npallet_staking\npallet_something\npallet_xcm_benchmarks::generic\n", # Output for westend runtime + ] + + with patch('sys.exit') as mock_exit: + import cmd + cmd.main() + mock_exit.assert_not_called() + + expected_calls = [ + # Build calls + call("forklift cargo build -p westend-runtime --profile release --features runtime-benchmarks"), + + # Westend runtime calls + call(get_mock_bench_output( + 'westend', + 'pallet_xcm_benchmarks::generic', + './polkadot/runtime/westend/src/weights/xcm', + header_path, + "polkadot/xcm/pallet-xcm-benchmarks/template.hbs" + )), + ] + self.mock_system.assert_has_calls(expected_calls, any_order=True) + + def test_bench_command_two_runtimes_two_pallets(self): + self.mock_parse_args.return_value = (argparse.Namespace( + command='bench', + runtime=['westend', 'rococo'], + pallet=['pallet_balances', 'pallet_staking'], + continue_on_fail=False, + quiet=False, + clean=False, + image=None + ), []) + self.mock_popen.return_value.read.side_effect = [ + "pallet_staking\npallet_balances\n", # Output for westend runtime + "pallet_staking\npallet_balances\n", # Output for rococo runtime + ] + + with patch('sys.exit') as mock_exit: + import cmd + cmd.main() + mock_exit.assert_not_called() + header_path = os.path.abspath('polkadot/file_header.txt') + + expected_calls = [ + # Build calls + call("forklift cargo build -p westend-runtime --profile release --features runtime-benchmarks"), + call("forklift cargo build -p rococo-runtime --profile release --features runtime-benchmarks"), + # Westend runtime calls + call(get_mock_bench_output('westend', 'pallet_staking', './polkadot/runtime/westend/src/weights', header_path)), + call(get_mock_bench_output('westend', 'pallet_balances', './polkadot/runtime/westend/src/weights', header_path)), + # Rococo runtime calls + call(get_mock_bench_output('rococo', 'pallet_staking', './polkadot/runtime/rococo/src/weights', header_path)), + call(get_mock_bench_output('rococo', 'pallet_balances', './polkadot/runtime/rococo/src/weights', header_path)), + ] + self.mock_system.assert_has_calls(expected_calls, any_order=True) + + def test_bench_command_one_dev_runtime(self): + self.mock_parse_args.return_value = (argparse.Namespace( + command='bench', + runtime=['dev'], + pallet=['pallet_balances'], + continue_on_fail=False, + quiet=False, + clean=False, + image=None + ), []) + manifest_dir = "substrate/frame/kitchensink" + self.mock_popen.return_value.read.side_effect = [ + "pallet_balances\npallet_something", # Output for dev runtime + manifest_dir + "/Cargo.toml" # Output for manifest path in dev runtime + ] + header_path = os.path.abspath('substrate/HEADER-APACHE2') + + with patch('sys.exit') as mock_exit: + import cmd + cmd.main() + mock_exit.assert_not_called() + + expected_calls = [ + # Build calls + call("forklift cargo build -p kitchensink-runtime --profile release --features runtime-benchmarks"), + # Westend runtime calls + call(get_mock_bench_output( + 'kitchensink', + 'pallet_balances', + manifest_dir + "/src/weights.rs", + header_path, + "substrate/.maintain/frame-weight-template.hbs" + )), + ] + self.mock_system.assert_has_calls(expected_calls, any_order=True) + + def test_bench_command_one_cumulus_runtime(self): + self.mock_parse_args.return_value = (argparse.Namespace( + command='bench', + runtime=['asset-hub-westend'], + pallet=['pallet_assets'], + continue_on_fail=False, + quiet=False, + clean=False, + image=None + ), []) + self.mock_popen.return_value.read.side_effect = [ + "pallet_assets\n", # Output for asset-hub-westend runtime + ] + header_path = os.path.abspath('cumulus/file_header.txt') + + with patch('sys.exit') as mock_exit: + import cmd + cmd.main() + mock_exit.assert_not_called() + + expected_calls = [ + # Build calls + call("forklift cargo build -p asset-hub-westend-runtime --profile release --features runtime-benchmarks"), + # Asset-hub-westend runtime calls + call(get_mock_bench_output( + 'asset-hub-westend', + 'pallet_assets', + './cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights', + header_path + )), + ] + + self.mock_system.assert_has_calls(expected_calls, any_order=True) + + def test_bench_command_one_cumulus_runtime_xcm(self): + self.mock_parse_args.return_value = (argparse.Namespace( + command='bench', + runtime=['asset-hub-westend'], + pallet=['pallet_xcm_benchmarks::generic', 'pallet_assets'], + continue_on_fail=False, + quiet=False, + clean=False, + image=None + ), []) + self.mock_popen.return_value.read.side_effect = [ + "pallet_assets\npallet_xcm_benchmarks::generic\n", # Output for asset-hub-westend runtime + ] + header_path = os.path.abspath('cumulus/file_header.txt') + + with patch('sys.exit') as mock_exit: + import cmd + cmd.main() + mock_exit.assert_not_called() + + expected_calls = [ + # Build calls + call("forklift cargo build -p asset-hub-westend-runtime --profile release --features runtime-benchmarks"), + # Asset-hub-westend runtime calls + call(get_mock_bench_output( + 'asset-hub-westend', + 'pallet_xcm_benchmarks::generic', + './cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm', + header_path, + "cumulus/templates/xcm-bench-template.hbs" + )), + call(get_mock_bench_output( + 'asset-hub-westend', + 'pallet_assets', + './cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights', + header_path + )), + ] + + self.mock_system.assert_has_calls(expected_calls, any_order=True) + + @patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='fmt', continue_on_fail=False), [])) + @patch('os.system', return_value=0) + def test_fmt_command(self, mock_system, mock_parse_args): + with patch('sys.exit') as mock_exit: + import cmd + cmd.main() + mock_exit.assert_not_called() + mock_system.assert_any_call('cargo +nightly fmt') + mock_system.assert_any_call('taplo format --config .config/taplo.toml') + + @patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='update-ui', continue_on_fail=False), [])) + @patch('os.system', return_value=0) + def test_update_ui_command(self, mock_system, mock_parse_args): + with patch('sys.exit') as mock_exit: + import cmd + cmd.main() + mock_exit.assert_not_called() + mock_system.assert_called_with('sh ./scripts/update-ui-tests.sh') + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/.github/workflows/cmd-tests.yml b/.github/workflows/cmd-tests.yml new file mode 100644 index 000000000000..87d7ee1dcc2d --- /dev/null +++ b/.github/workflows/cmd-tests.yml @@ -0,0 +1,14 @@ +name: Command Bot Tests + +on: + pull_request: + +permissions: + contents: read + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - run: python3 .github/scripts/cmd/test_cmd.py \ No newline at end of file diff --git a/.github/workflows/cmd.yml b/.github/workflows/cmd.yml index dfdf771a6105..79a4f6c3b19c 100644 --- a/.github/workflows/cmd.yml +++ b/.github/workflows/cmd.yml @@ -358,7 +358,7 @@ jobs: git config --local user.email "action@github.com" git config --local user.name "GitHub Action" - git pull origin ${{ needs.get-pr-branch.outputs.pr-branch }} + git pull --rebase origin ${{ needs.get-pr-branch.outputs.pr-branch }} git add . git restore --staged Cargo.lock # ignore changes in Cargo.lock git commit -m "Update from ${{ github.actor }} running command '${{ steps.get-pr-comment.outputs.group2 }}'" || true diff --git a/.github/workflows/runtimes-matrix.json b/.github/workflows/runtimes-matrix.json index 45a3acd3f166..102437876daf 100644 --- a/.github/workflows/runtimes-matrix.json +++ b/.github/workflows/runtimes-matrix.json @@ -3,6 +3,8 @@ "name": "dev", "package": "kitchensink-runtime", "path": "substrate/frame", + "header": "substrate/HEADER-APACHE2", + "template": "substrate/.maintain/frame-weight-template.hbs", "uri": null, "is_relay": false }, @@ -10,6 +12,8 @@ "name": "westend", "package": "westend-runtime", "path": "polkadot/runtime/westend", + "header": "polkadot/file_header.txt", + "template": "polkadot/xcm/pallet-xcm-benchmarks/template.hbs", "uri": "wss://try-runtime-westend.polkadot.io:443", "is_relay": true }, @@ -17,6 +21,8 @@ "name": "rococo", "package": "rococo-runtime", "path": "polkadot/runtime/rococo", + "header": "polkadot/file_header.txt", + "template": "polkadot/xcm/pallet-xcm-benchmarks/template.hbs", "uri": "wss://try-runtime-rococo.polkadot.io:443", "is_relay": true }, @@ -24,6 +30,8 @@ "name": "asset-hub-westend", "package": "asset-hub-westend-runtime", "path": "cumulus/parachains/runtimes/assets/asset-hub-westend", + "header": "cumulus/file_header.txt", + "template": "cumulus/templates/xcm-bench-template.hbs", "uri": "wss://westend-asset-hub-rpc.polkadot.io:443", "is_relay": false }, @@ -31,6 +39,8 @@ "name": "asset-hub-rococo", "package": "asset-hub-rococo-runtime", "path": "cumulus/parachains/runtimes/assets/asset-hub-rococo", + "header": "cumulus/file_header.txt", + "template": "cumulus/templates/xcm-bench-template.hbs", "uri": "wss://rococo-asset-hub-rpc.polkadot.io:443", "is_relay": false }, @@ -38,6 +48,8 @@ "name": "bridge-hub-rococo", "package": "bridge-hub-rococo-runtime", "path": "cumulus/parachains/runtimes/bridges/bridge-hub-rococo", + "header": "cumulus/file_header.txt", + "template": "cumulus/templates/xcm-bench-template.hbs", "uri": "wss://rococo-bridge-hub-rpc.polkadot.io:443", "is_relay": false }, @@ -45,6 +57,8 @@ "name": "bridge-hub-westend", "package": "bridge-hub-rococo-runtime", "path": "cumulus/parachains/runtimes/bridges/bridge-hub-westend", + "header": "cumulus/file_header.txt", + "template": "cumulus/templates/xcm-bench-template.hbs", "uri": "wss://westend-bridge-hub-rpc.polkadot.io:443", "is_relay": false }, @@ -52,12 +66,16 @@ "name": "collectives-westend", "package": "collectives-westend-runtime", "path": "cumulus/parachains/runtimes/collectives/collectives-westend", + "header": "cumulus/file_header.txt", + "template": "cumulus/templates/xcm-bench-template.hbs", "uri": "wss://westend-collectives-rpc.polkadot.io:443" }, { "name": "contracts-rococo", "package": "contracts-rococo-runtime", "path": "cumulus/parachains/runtimes/contracts/contracts-rococo", + "header": "cumulus/file_header.txt", + "template": "cumulus/templates/xcm-bench-template.hbs", "uri": "wss://rococo-contracts-rpc.polkadot.io:443", "is_relay": false }, @@ -65,6 +83,8 @@ "name": "coretime-rococo", "package": "coretime-rococo-runtime", "path": "cumulus/parachains/runtimes/coretime/coretime-rococo", + "header": "cumulus/file_header.txt", + "template": "cumulus/templates/xcm-bench-template.hbs", "uri": "wss://rococo-coretime-rpc.polkadot.io:443", "is_relay": false }, @@ -72,6 +92,8 @@ "name": "coretime-westend", "package": "coretime-westend-runtime", "path": "cumulus/parachains/runtimes/coretime/coretime-westend", + "header": "cumulus/file_header.txt", + "template": "cumulus/templates/xcm-bench-template.hbs", "uri": "wss://westend-coretime-rpc.polkadot.io:443", "is_relay": false }, @@ -79,12 +101,17 @@ "name": "glutton-westend", "package": "glutton-westend-runtime", "path": "cumulus/parachains/runtimes/gluttons/glutton-westend", + "header": "cumulus/file_header.txt", + "template": "cumulus/templates/xcm-bench-template.hbs", + "uri": null, "is_relay": false }, { "name": "people-rococo", "package": "people-rococo-runtime", "path": "cumulus/parachains/runtimes/people/people-rococo", + "header": "cumulus/file_header.txt", + "template": "cumulus/templates/xcm-bench-template.hbs", "uri": "wss://rococo-people-rpc.polkadot.io:443", "is_relay": false }, @@ -92,6 +119,8 @@ "name": "people-westend", "package": "people-westend-runtime", "path": "cumulus/parachains/runtimes/people/people-westend", + "header": "cumulus/file_header.txt", + "template": "cumulus/templates/xcm-bench-template.hbs", "uri": "wss://westend-people-rpc.polkadot.io:443", "is_relay": false } diff --git a/substrate/frame/balances/src/weights.rs b/substrate/frame/balances/src/weights.rs index e82c97160efc..55decef273f6 100644 --- a/substrate/frame/balances/src/weights.rs +++ b/substrate/frame/balances/src/weights.rs @@ -17,27 +17,27 @@ //! Autogenerated weights for `pallet_balances` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-05-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 42.0.0 +//! DATE: 2024-09-04, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-unxyhko3-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` +//! HOSTNAME: `8f4ffe8f7785`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// frame-omni-bencher +// v1 // benchmark // pallet -// --steps=50 -// --repeat=20 // --extrinsic=* +// --runtime=target/release/wbuild/kitchensink-runtime/kitchensink_runtime.wasm +// --pallet=pallet_balances +// --header=/__w/polkadot-sdk/polkadot-sdk/substrate/HEADER-APACHE2 +// --output=/__w/polkadot-sdk/polkadot-sdk/substrate/frame/balances/src/weights.rs // --wasm-execution=compiled +// --steps=50 +// --repeat=20 // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_balances -// --chain=dev -// --header=./substrate/HEADER-APACHE2 -// --output=./substrate/frame/balances/src/weights.rs -// --template=./substrate/.maintain/frame-weight-template.hbs +// --template=substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -71,8 +71,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 47_552_000 picoseconds. - Weight::from_parts(48_363_000, 3593) + // Minimum execution time: 75_624_000 picoseconds. + Weight::from_parts(77_290_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -82,8 +82,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 37_565_000 picoseconds. - Weight::from_parts(38_159_000, 3593) + // Minimum execution time: 60_398_000 picoseconds. + Weight::from_parts(61_290_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -91,10 +91,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_set_balance_creating() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `52` // Estimated: `3593` - // Minimum execution time: 14_147_000 picoseconds. - Weight::from_parts(14_687_000, 3593) + // Minimum execution time: 18_963_000 picoseconds. + Weight::from_parts(19_802_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -102,10 +102,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_set_balance_killing() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `52` // Estimated: `3593` - // Minimum execution time: 19_188_000 picoseconds. - Weight::from_parts(19_929_000, 3593) + // Minimum execution time: 30_517_000 picoseconds. + Weight::from_parts(31_293_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -113,10 +113,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `103` + // Measured: `52` // Estimated: `6196` - // Minimum execution time: 48_903_000 picoseconds. - Weight::from_parts(49_944_000, 6196) + // Minimum execution time: 77_017_000 picoseconds. + Weight::from_parts(78_184_000, 6196) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -126,8 +126,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 46_573_000 picoseconds. - Weight::from_parts(47_385_000, 3593) + // Minimum execution time: 75_600_000 picoseconds. + Weight::from_parts(76_817_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -135,10 +135,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_unreserve() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `52` // Estimated: `3593` - // Minimum execution time: 16_750_000 picoseconds. - Weight::from_parts(17_233_000, 3593) + // Minimum execution time: 24_503_000 picoseconds. + Weight::from_parts(25_026_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -149,10 +149,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0 + u * (135 ±0)` // Estimated: `990 + u * (2603 ±0)` - // Minimum execution time: 16_333_000 picoseconds. - Weight::from_parts(16_588_000, 990) - // Standard Error: 12_254 - .saturating_add(Weight::from_parts(13_973_659, 0).saturating_mul(u.into())) + // Minimum execution time: 24_077_000 picoseconds. + Weight::from_parts(24_339_000, 990) + // Standard Error: 18_669 + .saturating_add(Weight::from_parts(21_570_294, 0).saturating_mul(u.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(u.into()))) .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) @@ -161,22 +161,22 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_265_000 picoseconds. - Weight::from_parts(6_594_000, 0) + // Minimum execution time: 8_070_000 picoseconds. + Weight::from_parts(8_727_000, 0) } fn burn_allow_death() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 30_151_000 picoseconds. - Weight::from_parts(30_968_000, 0) + // Minimum execution time: 46_978_000 picoseconds. + Weight::from_parts(47_917_000, 0) } fn burn_keep_alive() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 20_055_000 picoseconds. - Weight::from_parts(20_711_000, 0) + // Minimum execution time: 31_141_000 picoseconds. + Weight::from_parts(31_917_000, 0) } } @@ -188,8 +188,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 47_552_000 picoseconds. - Weight::from_parts(48_363_000, 3593) + // Minimum execution time: 75_624_000 picoseconds. + Weight::from_parts(77_290_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -199,8 +199,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 37_565_000 picoseconds. - Weight::from_parts(38_159_000, 3593) + // Minimum execution time: 60_398_000 picoseconds. + Weight::from_parts(61_290_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -208,10 +208,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_set_balance_creating() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `52` // Estimated: `3593` - // Minimum execution time: 14_147_000 picoseconds. - Weight::from_parts(14_687_000, 3593) + // Minimum execution time: 18_963_000 picoseconds. + Weight::from_parts(19_802_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -219,10 +219,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_set_balance_killing() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `52` // Estimated: `3593` - // Minimum execution time: 19_188_000 picoseconds. - Weight::from_parts(19_929_000, 3593) + // Minimum execution time: 30_517_000 picoseconds. + Weight::from_parts(31_293_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -230,10 +230,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `103` + // Measured: `52` // Estimated: `6196` - // Minimum execution time: 48_903_000 picoseconds. - Weight::from_parts(49_944_000, 6196) + // Minimum execution time: 77_017_000 picoseconds. + Weight::from_parts(78_184_000, 6196) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -243,8 +243,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 46_573_000 picoseconds. - Weight::from_parts(47_385_000, 3593) + // Minimum execution time: 75_600_000 picoseconds. + Weight::from_parts(76_817_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -252,10 +252,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_unreserve() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `52` // Estimated: `3593` - // Minimum execution time: 16_750_000 picoseconds. - Weight::from_parts(17_233_000, 3593) + // Minimum execution time: 24_503_000 picoseconds. + Weight::from_parts(25_026_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -266,10 +266,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0 + u * (135 ±0)` // Estimated: `990 + u * (2603 ±0)` - // Minimum execution time: 16_333_000 picoseconds. - Weight::from_parts(16_588_000, 990) - // Standard Error: 12_254 - .saturating_add(Weight::from_parts(13_973_659, 0).saturating_mul(u.into())) + // Minimum execution time: 24_077_000 picoseconds. + Weight::from_parts(24_339_000, 990) + // Standard Error: 18_669 + .saturating_add(Weight::from_parts(21_570_294, 0).saturating_mul(u.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(u.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(u.into()))) .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) @@ -278,21 +278,21 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_265_000 picoseconds. - Weight::from_parts(6_594_000, 0) + // Minimum execution time: 8_070_000 picoseconds. + Weight::from_parts(8_727_000, 0) } fn burn_allow_death() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 30_151_000 picoseconds. - Weight::from_parts(30_968_000, 0) + // Minimum execution time: 46_978_000 picoseconds. + Weight::from_parts(47_917_000, 0) } fn burn_keep_alive() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 20_055_000 picoseconds. - Weight::from_parts(20_711_000, 0) + // Minimum execution time: 31_141_000 picoseconds. + Weight::from_parts(31_917_000, 0) } } From fdb4554e26ebdd4d729158501a3ddb3c6ebdfb6f Mon Sep 17 00:00:00 2001 From: Liu-Cheng Xu Date: Fri, 6 Sep 2024 16:21:09 +0800 Subject: [PATCH 05/66] Introduce `BlockGap` (#5592) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously, block gaps could only be created by warp sync, but block gaps will also be generated by fast sync once #5406 is fixed. This PR is part 1 of the detailed implementation plan in https://github.com/paritytech/polkadot-sdk/issues/5406#issuecomment-2325064863: refactor `BlockGap`. This refactor converts the existing `(NumberFor, NumberFor)` into a dedicated `BlockGap>` struct. This change is purely structural and does not alter existing logic, but lays the groundwork for the follow-up PR. The compatibility concern caused by the new structure is addressed in the second commit. cc @dmitry-markin --------- Co-authored-by: Bastian Köcher --- prdoc/pr_5592.prdoc | 26 +++++++ substrate/client/consensus/babe/src/lib.rs | 6 +- substrate/client/db/src/lib.rs | 73 ++++++++++++------- substrate/client/db/src/utils.rs | 47 ++++++++++-- .../network/sync/src/strategy/chain_sync.rs | 4 +- substrate/client/service/src/client/client.rs | 5 +- .../primitives/blockchain/src/backend.rs | 40 ++++++++-- 7 files changed, 153 insertions(+), 48 deletions(-) create mode 100644 prdoc/pr_5592.prdoc diff --git a/prdoc/pr_5592.prdoc b/prdoc/pr_5592.prdoc new file mode 100644 index 000000000000..9d51917db7b1 --- /dev/null +++ b/prdoc/pr_5592.prdoc @@ -0,0 +1,26 @@ +title: Introduce `BlockGap` + +doc: + - audience: Node Dev + description: | + This is the first step towards https://github.com/paritytech/polkadot-sdk/issues/5406, + refactoring the representation of block gap. This refactor converts the existing + `(NumberFor, NumberFor)` into a dedicated `BlockGap>` + struct. This change is purely structural and does not alter existing logic, but lays + the groundwork for the follow-up PR. The compatibility concern in the database caused + by the new structure transition is addressed as well. + + The `BlockGap` refactoring results in breaking changes in the `Info` structure returned + in `client.info()`. + +crates: + - name: sc-consensus-babe + bump: none + - name: sc-client-db + bump: none + - name: sc-network-sync + bump: none + - name: sc-service + bump: none + - name: sp-blockchain + bump: major diff --git a/substrate/client/consensus/babe/src/lib.rs b/substrate/client/consensus/babe/src/lib.rs index 9770b16871e1..4cf66302ec85 100644 --- a/substrate/client/consensus/babe/src/lib.rs +++ b/substrate/client/consensus/babe/src/lib.rs @@ -1146,7 +1146,9 @@ where let info = self.client.info(); let number = *block.header.number(); - if info.block_gap.map_or(false, |(s, e)| s <= number && number <= e) || block.with_state() { + if info.block_gap.map_or(false, |gap| gap.start <= number && number <= gap.end) || + block.with_state() + { // Verification for imported blocks is skipped in two cases: // 1. When importing blocks below the last finalized block during network initial // synchronization. @@ -1420,7 +1422,7 @@ where // Skip babe logic if block already in chain or importing blocks during initial sync, // otherwise the check for epoch changes will error because trying to re-import an // epoch change or because of missing epoch data in the tree, respectively. - if info.block_gap.map_or(false, |(s, e)| s <= number && number <= e) || + if info.block_gap.map_or(false, |gap| gap.start <= number && number <= gap.end) || block_status == BlockStatus::InChain { // When re-importing existing block strip away intermediates. diff --git a/substrate/client/db/src/lib.rs b/substrate/client/db/src/lib.rs index eadb26254a18..4559a01e57e3 100644 --- a/substrate/client/db/src/lib.rs +++ b/substrate/client/db/src/lib.rs @@ -61,6 +61,7 @@ use codec::{Decode, Encode}; use hash_db::Prefix; use sc_client_api::{ backend::NewBlockState, + blockchain::{BlockGap, BlockGapType}, leaves::{FinalizationOutcome, LeafSet}, utils::is_descendent_of, IoInfo, MemoryInfo, MemorySize, UsageInfo, @@ -91,6 +92,7 @@ use sp_state_machine::{ StorageValue, UsageInfo as StateUsageInfo, }; use sp_trie::{cache::SharedTrieCache, prefixed_key, MemoryDB, MerkleValue, PrefixedMemoryDB}; +use utils::BLOCK_GAP_CURRENT_VERSION; // Re-export the Database trait so that one can pass an implementation of it. pub use sc_state_db::PruningMode; @@ -522,7 +524,7 @@ impl BlockchainDb { } } - fn update_block_gap(&self, gap: Option<(NumberFor, NumberFor)>) { + fn update_block_gap(&self, gap: Option>>) { let mut meta = self.meta.write(); meta.block_gap = gap; } @@ -1671,35 +1673,56 @@ impl Backend { ); } - if let Some((mut start, end)) = block_gap { - if number == start { - start += One::one(); - utils::insert_number_to_key_mapping( - &mut transaction, - columns::KEY_LOOKUP, - number, - hash, - )?; - if start > end { - transaction.remove(columns::META, meta_keys::BLOCK_GAP); - block_gap = None; - debug!(target: "db", "Removed block gap."); - } else { - block_gap = Some((start, end)); - debug!(target: "db", "Update block gap. {block_gap:?}"); - transaction.set( - columns::META, - meta_keys::BLOCK_GAP, - &(start, end).encode(), - ); - } - block_gap_updated = true; + if let Some(mut gap) = block_gap { + match gap.gap_type { + BlockGapType::MissingHeaderAndBody => + if number == gap.start { + gap.start += One::one(); + utils::insert_number_to_key_mapping( + &mut transaction, + columns::KEY_LOOKUP, + number, + hash, + )?; + if gap.start > gap.end { + transaction.remove(columns::META, meta_keys::BLOCK_GAP); + transaction.remove(columns::META, meta_keys::BLOCK_GAP_VERSION); + block_gap = None; + debug!(target: "db", "Removed block gap."); + } else { + block_gap = Some(gap); + debug!(target: "db", "Update block gap. {block_gap:?}"); + transaction.set( + columns::META, + meta_keys::BLOCK_GAP, + &gap.encode(), + ); + transaction.set( + columns::META, + meta_keys::BLOCK_GAP_VERSION, + &BLOCK_GAP_CURRENT_VERSION.encode(), + ); + } + block_gap_updated = true; + }, + BlockGapType::MissingBody => { + unreachable!("Unsupported block gap. TODO: https://github.com/paritytech/polkadot-sdk/issues/5406") + }, } } else if number > best_num + One::one() && number > One::one() && self.blockchain.header(parent_hash)?.is_none() { - let gap = (best_num + One::one(), number - One::one()); + let gap = BlockGap { + start: best_num + One::one(), + end: number - One::one(), + gap_type: BlockGapType::MissingHeaderAndBody, + }; transaction.set(columns::META, meta_keys::BLOCK_GAP, &gap.encode()); + transaction.set( + columns::META, + meta_keys::BLOCK_GAP_VERSION, + &BLOCK_GAP_CURRENT_VERSION.encode(), + ); block_gap = Some(gap); block_gap_updated = true; debug!(target: "db", "Detected block gap {block_gap:?}"); diff --git a/substrate/client/db/src/utils.rs b/substrate/client/db/src/utils.rs index b532e0d46662..0b591c967e60 100644 --- a/substrate/client/db/src/utils.rs +++ b/substrate/client/db/src/utils.rs @@ -25,10 +25,14 @@ use log::{debug, info}; use crate::{Database, DatabaseSource, DbHash}; use codec::Decode; +use sc_client_api::blockchain::{BlockGap, BlockGapType}; use sp_database::Transaction; use sp_runtime::{ generic::BlockId, - traits::{Block as BlockT, Header as HeaderT, UniqueSaturatedFrom, UniqueSaturatedInto, Zero}, + traits::{ + Block as BlockT, Header as HeaderT, NumberFor, UniqueSaturatedFrom, UniqueSaturatedInto, + Zero, + }, }; use sp_trie::DBValue; @@ -38,6 +42,9 @@ pub const NUM_COLUMNS: u32 = 13; /// Meta column. The set of keys in the column is shared by full && light storages. pub const COLUMN_META: u32 = 0; +/// Current block gap version. +pub const BLOCK_GAP_CURRENT_VERSION: u32 = 1; + /// Keys of entries in COLUMN_META. pub mod meta_keys { /// Type of storage (full or light). @@ -50,6 +57,8 @@ pub mod meta_keys { pub const FINALIZED_STATE: &[u8; 6] = b"fstate"; /// Block gap. pub const BLOCK_GAP: &[u8; 3] = b"gap"; + /// Block gap version. + pub const BLOCK_GAP_VERSION: &[u8; 7] = b"gap_ver"; /// Genesis block hash. pub const GENESIS_HASH: &[u8; 3] = b"gen"; /// Leaves prefix list key. @@ -73,8 +82,8 @@ pub struct Meta { pub genesis_hash: H, /// Finalized state, if any pub finalized_state: Option<(H, N)>, - /// Block gap, start and end inclusive, if any. - pub block_gap: Option<(N, N)>, + /// Block gap, if any. + pub block_gap: Option>, } /// A block lookup key: used for canonical lookup from block number to hash @@ -197,7 +206,7 @@ fn open_database_at( open_kvdb_rocksdb::(path, db_type, create, *cache_size)?, DatabaseSource::Custom { db, require_create_flag } => { if *require_create_flag && !create { - return Err(OpenDbError::DoesNotExist) + return Err(OpenDbError::DoesNotExist); } db.clone() }, @@ -364,7 +373,7 @@ pub fn check_database_type( return Err(OpenDbError::UnexpectedDbType { expected: db_type, found: stored_type.to_owned(), - }) + }); }, None => { let mut transaction = Transaction::new(); @@ -515,9 +524,31 @@ where } else { None }; - let block_gap = db - .get(COLUMN_META, meta_keys::BLOCK_GAP) - .and_then(|d| Decode::decode(&mut d.as_slice()).ok()); + let block_gap = match db + .get(COLUMN_META, meta_keys::BLOCK_GAP_VERSION) + .and_then(|d| u32::decode(&mut d.as_slice()).ok()) + { + None => { + let old_block_gap: Option<(NumberFor, NumberFor)> = db + .get(COLUMN_META, meta_keys::BLOCK_GAP) + .and_then(|d| Decode::decode(&mut d.as_slice()).ok()); + + old_block_gap.map(|(start, end)| BlockGap { + start, + end, + gap_type: BlockGapType::MissingHeaderAndBody, + }) + }, + Some(version) => match version { + BLOCK_GAP_CURRENT_VERSION => db + .get(COLUMN_META, meta_keys::BLOCK_GAP) + .and_then(|d| Decode::decode(&mut d.as_slice()).ok()), + v => + return Err(sp_blockchain::Error::Backend(format!( + "Unsupported block gap DB version: {v}" + ))), + }, + }; debug!(target: "db", "block_gap={:?}", block_gap); Ok(Meta { diff --git a/substrate/client/network/sync/src/strategy/chain_sync.rs b/substrate/client/network/sync/src/strategy/chain_sync.rs index 21e474048625..f29ed1b083e8 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync.rs @@ -44,7 +44,7 @@ use crate::{ use codec::Encode; use log::{debug, error, info, trace, warn}; use prometheus_endpoint::{register, Gauge, PrometheusError, Registry, U64}; -use sc_client_api::{BlockBackend, ProofProvider}; +use sc_client_api::{blockchain::BlockGap, BlockBackend, ProofProvider}; use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock}; use sc_network_common::sync::message::{ BlockAnnounce, BlockAttributes, BlockData, BlockRequest, BlockResponse, Direction, FromBlock, @@ -1381,7 +1381,7 @@ where } } - if let Some((start, end)) = info.block_gap { + if let Some(BlockGap { start, end, .. }) = info.block_gap { debug!(target: LOG_TARGET, "Starting gap sync #{start} - #{end}"); self.gap_sync = Some(GapSync { best_queued_number: start - One::one(), diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index 22defd7c5514..8b699c7faffd 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -604,9 +604,8 @@ where } let info = self.backend.blockchain().info(); - let gap_block = info - .block_gap - .map_or(false, |(start, _)| *import_headers.post().number() == start); + let gap_block = + info.block_gap.map_or(false, |gap| *import_headers.post().number() == gap.start); // the block is lower than our last finalized block so it must revert // finality, refusing import. diff --git a/substrate/primitives/blockchain/src/backend.rs b/substrate/primitives/blockchain/src/backend.rs index fd0c5795cbfd..d7386a71a0d1 100644 --- a/substrate/primitives/blockchain/src/backend.rs +++ b/substrate/primitives/blockchain/src/backend.rs @@ -17,6 +17,7 @@ //! Substrate blockchain trait +use codec::{Decode, Encode}; use parking_lot::RwLock; use sp_runtime::{ generic::BlockId, @@ -109,7 +110,7 @@ pub trait ForkBackend: for block in tree_route.retracted() { expanded_forks.insert(block.hash); } - continue + continue; }, Err(_) => { // There are cases when blocks are missing (e.g. warp-sync). @@ -196,7 +197,7 @@ pub trait Backend: let info = self.info(); if info.finalized_number > *base_header.number() { // `base_header` is on a dead fork. - return Ok(None) + return Ok(None); } self.leaves()? }; @@ -207,7 +208,7 @@ pub trait Backend: // go backwards through the chain (via parent links) loop { if current_hash == base_hash { - return Ok(Some(leaf_hash)) + return Ok(Some(leaf_hash)); } let current_header = self @@ -216,7 +217,7 @@ pub trait Backend: // stop search in this chain once we go below the target's block number if current_header.number() < base_header.number() { - break + break; } current_hash = *current_header.parent_hash(); @@ -266,7 +267,7 @@ pub trait Backend: // If we have only one leaf there are no forks, and we can return early. if finalized_block_number == Zero::zero() || leaves.len() == 1 { - return Ok(DisplacedLeavesAfterFinalization::default()) + return Ok(DisplacedLeavesAfterFinalization::default()); } // Store hashes of finalized blocks for quick checking later, the last block is the @@ -332,7 +333,7 @@ pub trait Backend: elapsed = ?now.elapsed(), "Added genesis leaf to displaced leaves." ); - continue + continue; } debug!( @@ -539,6 +540,29 @@ impl DisplacedLeavesAfterFinalization { } } +/// Represents the type of block gaps that may result from either warp sync or fast sync. +#[derive(Debug, Clone, Copy, Eq, PartialEq, Encode, Decode)] +pub enum BlockGapType { + /// Both the header and body are missing, as a result of warp sync. + MissingHeaderAndBody, + /// The block body is missing, as a result of fast sync. + MissingBody, +} + +/// Represents the block gap resulted by warp sync or fast sync. +/// +/// A block gap is a range of blocks where either the bodies, or both headers and bodies are +/// missing. +#[derive(Debug, Clone, Copy, Eq, PartialEq, Encode, Decode)] +pub struct BlockGap { + /// The starting block number of the gap (inclusive). + pub start: N, + /// The ending block number of the gap (inclusive). + pub end: N, + /// The type of gap. + pub gap_type: BlockGapType, +} + /// Blockchain info #[derive(Debug, Eq, PartialEq, Clone)] pub struct Info { @@ -556,8 +580,8 @@ pub struct Info { pub finalized_state: Option<(Block::Hash, <::Header as HeaderT>::Number)>, /// Number of concurrent leave forks. pub number_leaves: usize, - /// Missing blocks after warp sync. (start, end). - pub block_gap: Option<(NumberFor, NumberFor)>, + /// Missing blocks after warp sync or fast sync. + pub block_gap: Option>>, } /// Block status. From 76df1ae460fb2f9910051e0dac2211ab8d156ced Mon Sep 17 00:00:00 2001 From: Egor_P Date: Fri, 6 Sep 2024 10:29:26 +0200 Subject: [PATCH 06/66] [CI/Release] Pipeline to create a stable release branch (#5598) This PR contains a pipeline which is going to branch off the new stable release branch (e.g. `stab2412`, `stable2503`) and bump `polkadot` `NODE_VERSION`, `spec_version` of the runtimes and reorganisation of the `prdocs` related to the new stable release. This is a first step in the automated `polkadot-sdk` release flow as part of the task: https://github.com/paritytech/polkadot-sdk/issues/3291 The pipeline is not supposed to be triggered in the main` polkadot-sdk` repo, but in the fork in the [`paritytech-release`](https://github.com/paritytech-release/polkadot-sdk) org, where the whole release flow is going to land. Closes: https://github.com/paritytech/release-engineering/issues/222 --- .github/scripts/common/lib.sh | 12 +- .github/scripts/release/release_lib.sh | 118 ++++++++++++++++++ .../workflows/release-branchoff-stable.yml | 105 ++++++++++++++++ 3 files changed, 229 insertions(+), 6 deletions(-) create mode 100644 .github/scripts/release/release_lib.sh create mode 100644 .github/workflows/release-branchoff-stable.yml diff --git a/.github/scripts/common/lib.sh b/.github/scripts/common/lib.sh index bfb3120ad9bb..5361db398ae7 100755 --- a/.github/scripts/common/lib.sh +++ b/.github/scripts/common/lib.sh @@ -299,23 +299,23 @@ function check_sha256() { } # Import GPG keys of the release team members -# This is done in parallel as it can take a while sometimes function import_gpg_keys() { - GPG_KEYSERVER=${GPG_KEYSERVER:-"keyserver.ubuntu.com"} + GPG_KEYSERVER=${GPG_KEYSERVER:-"hkps://keyserver.ubuntu.com"} SEC="9D4B2B6EB8F97156D19669A9FF0812D491B96798" EGOR="E6FC4D4782EB0FA64A4903CCDB7D3555DD3932D3" MORGAN="2E92A9D8B15D7891363D1AE8AF9E6C43F7F8C4CF" + PARITY_RELEASES="90BD75EBBB8E95CB3DA6078F94A4029AB4B35DAE" - echo "Importing GPG keys from $GPG_KEYSERVER in parallel" - for key in $SEC $EGOR $MORGAN; do + echo "Importing GPG keys from $GPG_KEYSERVER" + for key in $SEC $EGOR $MORGAN $PARITY_RELEASES; do ( echo "Importing GPG key $key" gpg --no-tty --quiet --keyserver $GPG_KEYSERVER --recv-keys $key echo -e "5\ny\n" | gpg --no-tty --command-fd 0 --expert --edit-key $key trust; - ) & + ) done wait - gpg -k $SEC + gpg -k } # Check the GPG signature for a given binary diff --git a/.github/scripts/release/release_lib.sh b/.github/scripts/release/release_lib.sh new file mode 100644 index 000000000000..81a3c14edec8 --- /dev/null +++ b/.github/scripts/release/release_lib.sh @@ -0,0 +1,118 @@ +#!/usr/bin/env bash + +# Set the new version by replacing the value of the constant given as patetrn +# in the file. +# +# input: pattern, version, file +#output: none +set_version() { + pattern=$1 + version=$2 + file=$3 + + sed -i "s/$pattern/\1\"${version}\"/g" $file + return 0 +} + +# Commit changes to git with specific message. +# "|| true" does not let script to fail with exit code 1, +# in case there is nothing to commit. +# +# input: MESSAGE (any message which should be used for the commit) +# output: none +commit_with_message() { + MESSAGE=$1 + git commit -a -m "$MESSAGE" || true +} + +# Retun list of the runtimes filterd +# input: none +# output: list of filtered runtimes +get_filtered_runtimes_list() { + grep_filters=("runtime.*" "test|template|starters|substrate") + + git grep spec_version: | grep .rs: | grep -e "${grep_filters[0]}" | grep "lib.rs" | grep -vE "${grep_filters[1]}" | cut -d: -f1 +} + +# Sets provided spec version +# input: version +set_spec_versions() { + NEW_VERSION=$1 + runtimes_list=(${@:2}) + + printf "Setting spec_version to $NEW_VERSION\n" + + for f in ${runtimes_list[@]}; do + printf " processing $f" + sed -ri "s/spec_version: [0-9]+_[0-9]+_[0-9]+,/spec_version: $NEW_VERSION,/" $f + done + + commit_with_message "Bump spec_version to $NEW_VERSION" + + git_show_log 'spec_version' +} + +# Displays formated results of the git log command +# for the given pattern which needs to be found in logs +# input: pattern, count (optional, default is 10) +git_show_log() { + PATTERN="$1" + COUNT=${2:-10} + git log --pretty=format:"%h %ad | %s%d [%an]" --graph --date=iso-strict | \ + head -n $COUNT | grep -iE "$PATTERN" --color=always -z +} + +# Get a spec_version number from the crate version +# +# ## inputs +# - v1.12.0 or 1.12.0 +# +# ## output: +# 1_012_000 or 1_012_001 if SUFFIX is set +function get_spec_version() { + INPUT=$1 + SUFFIX=${SUFFIX:-000} #this variable makes it possible to set a specific ruuntime version like 93826 it can be intialised as sestem variable + [[ $INPUT =~ .*([0-9]+\.[0-9]+\.[0-9]{1,2}).* ]] + VERSION="${BASH_REMATCH[1]}" + MATCH="${BASH_REMATCH[0]}" + if [ -z $MATCH ]; then + return 1 + else + SPEC_VERSION="$(sed -e "s/\./_0/g" -e "s/_[^_]*\$/_$SUFFIX/" <<< $VERSION)" + echo "$SPEC_VERSION" + return 0 + fi +} + +# Reorganize the prdoc files for the release +# +# input: VERSION (e.g. v1.0.0) +# output: none +reorder_prdocs() { + VERSION="$1" + + printf "[+] ℹ️ Reordering prdocs:" + + VERSION=$(sed -E 's/^v([0-9]+\.[0-9]+\.[0-9]+).*$/\1/' <<< "$VERSION") #getting reed of the 'v' prefix + mkdir -p "prdoc/$VERSION" + mv prdoc/pr_*.prdoc prdoc/$VERSION + git add -A + commit_with_message "Reordering prdocs for the release $VERSION" +} + +# Bump the binary version of the polkadot-parachain binary with the +# new bumped version and commit changes. +# +# input: version e.g. 1.16.0 +set_polkadot_parachain_binary_version() { + bumped_version="$1" + cargo_toml_file="$2" + + set_version "\(^version = \)\".*\"" $bumped_version $cargo_toml_file + + cargo update --workspace --offline # we need this to update Cargo.loc with the new versions as well + + MESSAGE="Bump versions in: ${cargo_toml_file}" + commit_with_message "$MESSAGE" + git_show_log "$MESSAGE" +} diff --git a/.github/workflows/release-branchoff-stable.yml b/.github/workflows/release-branchoff-stable.yml new file mode 100644 index 000000000000..c236a66a9fae --- /dev/null +++ b/.github/workflows/release-branchoff-stable.yml @@ -0,0 +1,105 @@ +name: Release - Branch off stable branch + +on: + workflow_dispatch: + inputs: + stable_version: + description: New stable version in the format stableYYMM + required: true + type: string + + node_version: + description: Version of the polkadot node in the format vX.XX.X (e.g. 1.15.0) + required: true + +jobs: + # TODO: Activate this job when the pipeline is moved to the fork in the `paritytech-release` org + # check-workflow-can-run: + # uses: paritytech-release/sync-workflows/.github/workflows/check-syncronization.yml@latest + + + prepare-tooling: + runs-on: ubuntu-latest + outputs: + node_version: ${{ steps.validate_inputs.outputs.node_version }} + stable_version: ${{ steps.validate_inputs.outputs.stable_version }} + + steps: + - name: Checkout sources + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Validate inputs + id: validate_inputs + run: | + . ./.github/scripts/common/lib.sh + + node_version=$(filter_version_from_input "${{ inputs.node_version }}") + echo "node_version=${node_version}" >> $GITHUB_OUTPUT + + stable_version=$(validate_stable_tag ${{ inputs.stable_version }}) + echo "stable_version=${stable_version}" >> $GITHUB_OUTPUT + + create-stable-branch: + # needs: [check-workflow-can-run, prepare-tooling] + needs: [prepare-tooling] + # if: needs. check-workflow-can-run.outputs.checks_passed == 'true' + runs-on: ubuntu-latest + + env: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + STABLE_BRANCH_NAME: ${{ needs.prepare-tooling.outputs.stable_version }} + + steps: + - name: Install pgpkkms + run: | + # Install pgpkms that is used to sign commits + pip install git+https://github.com/paritytech-release/pgpkms.git@5a8f82fbb607ea102d8c178e761659de54c7af69 + + - name: Checkout sources + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + ref: master + + - name: Import gpg keys + run: | + . ./.github/scripts/common/lib.sh + + import_gpg_keys + + + - name: Config git + run: | + git config --global commit.gpgsign true + git config --global gpg.program /home/runner/.local/bin/pgpkms-git + git config --global user.name "ParityReleases" + git config --global user.email "release-team@parity.io" + git config --global user.signingKey "90BD75EBBB8E95CB3DA6078F94A4029AB4B35DAE" + + - name: Create stable branch + run: | + git checkout -b "$STABLE_BRANCH_NAME" + git show-ref "$STABLE_BRANCH_NAME" + + - name: Bump versions, reorder prdocs and push stable branch + run: | + . ./.github/scripts/release/release_lib.sh + + NODE_VERSION="${{ needs.prepare-tooling.outputs.node_version }}" + set_version "\(NODE_VERSION[^=]*= \)\".*\"" $NODE_VERSION "polkadot/node/primitives/src/lib.rs" + commit_with_message "Bump node version to $NODE_VERSION in polkadot-cli" + + SPEC_VERSION=$(get_spec_version $NODE_VERSION) + runtimes_list=$(get_filtered_runtimes_list) + set_spec_versions $SPEC_VERSION "${runtimes_list[@]}" + + # TODO: clarify what to do with the polkadot-parachain binary + # Set new version for polkadot-parachain binary to match the polkadot node binary + # set_polkadot_parachain_binary_version $NODE_VERSION "cumulus/polkadot-parachain/Cargo.toml" + + reorder_prdocs $NODE_VERSION + + git push origin "$STABLE_BRANCH_NAME" From 986e7ae4f29f804ee4dc89aaf52984d6eda5bd0b Mon Sep 17 00:00:00 2001 From: Radha <86818441+DrW3RK@users.noreply.github.com> Date: Fri, 6 Sep 2024 10:30:49 +0200 Subject: [PATCH 07/66] Update Templates Readme - Github Repo links (#5381) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When someone downloads the Polkadot SDK repo and navigates to the templates folder, the Readme instructions do not work. There is a getting started script of the Polkadot SDK readme which can be overlooked (and also it covers only minimal template and not the parachain/solochain templates). The instructions of the Readme files are updated such that they work for anyone on https://github.com/paritytech/polkadot-sdk https://github.com/paritytech/polkadot-sdk-minimal-template https://github.com/paritytech/polkadot-sdk-parachain-template https://github.com/paritytech/polkadot-sdk-solochain-template --------- Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Bastian Köcher --- templates/minimal/README.md | 8 ++++++++ templates/parachain/README.md | 8 ++++++++ templates/solochain/README.md | 10 +++++++++- 3 files changed, 25 insertions(+), 1 deletion(-) diff --git a/templates/minimal/README.md b/templates/minimal/README.md index 180c229e744e..fe1317a033c7 100644 --- a/templates/minimal/README.md +++ b/templates/minimal/README.md @@ -37,6 +37,14 @@ A Polkadot SDK based project such as this one consists of: * 🛠️ Depending on your operating system and Rust version, there might be additional packages required to compile this template - please take note of the Rust compiler output. +Fetch minimal template code: + +```sh +git clone https://github.com/paritytech/polkadot-sdk-minimal-template.git minimal-template + +cd minimal-template +``` + ### Build 🔨 Use the following command to build the node without launching it: diff --git a/templates/parachain/README.md b/templates/parachain/README.md index b912d8e005c7..3de85cbeb4dc 100644 --- a/templates/parachain/README.md +++ b/templates/parachain/README.md @@ -39,6 +39,14 @@ A Polkadot SDK based project such as this one consists of: * 🛠️ Depending on your operating system and Rust version, there might be additional packages required to compile this template - please take note of the Rust compiler output. +Fetch parachain template code: + +```sh +git clone https://github.com/paritytech/polkadot-sdk-parachain-template.git parachain-template + +cd parachain-template +``` + ### Build 🔨 Use the following command to build the node without launching it: diff --git a/templates/solochain/README.md b/templates/solochain/README.md index 6a5a7853f9c0..c4ce5c7f3fbb 100644 --- a/templates/solochain/README.md +++ b/templates/solochain/README.md @@ -23,9 +23,17 @@ packages required to compile this template. Check the the most common dependencies. Alternatively, you can use one of the [alternative installation](#alternatives-installations) options. +Fetch solochain template code: + +```sh +git clone https://github.com/paritytech/polkadot-sdk-solochain-template.git solochain-template + +cd solochain-template +``` + ### Build -Use the following command to build the node without launching it: +🔨 Use the following command to build the node without launching it: ```sh cargo build --release From 5040b3c2186308a06bad408643a5e475df4cfeeb Mon Sep 17 00:00:00 2001 From: Andrei Eres Date: Fri, 6 Sep 2024 13:29:16 +0200 Subject: [PATCH 08/66] Fix PVF precompilation for Kusama (#5606) ![image](https://github.com/user-attachments/assets/2deaee85-67c3-4119-b0c0-d2e7f818b4ea) Because on Kusama validators.len() < discovery_keys.len() we can tweak the PVF precompilation to allow prepare PVFs when the node is an authority but not a validator. --- .../node/core/candidate-validation/src/lib.rs | 11 +++++----- .../core/candidate-validation/src/tests.rs | 21 +++++++++---------- prdoc/pr_5606.prdoc | 13 ++++++++++++ 3 files changed, 29 insertions(+), 16 deletions(-) create mode 100644 prdoc/pr_5606.prdoc diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index 103d29e8d269..a9732e934414 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -47,7 +47,7 @@ use polkadot_primitives::{ }, AuthorityDiscoveryId, CandidateCommitments, CandidateDescriptor, CandidateEvent, CandidateReceipt, ExecutorParams, Hash, OccupiedCoreAssumption, PersistedValidationData, - PvfExecKind, PvfPrepKind, SessionIndex, ValidationCode, ValidationCodeHash, + PvfExecKind, PvfPrepKind, SessionIndex, ValidationCode, ValidationCodeHash, ValidatorId, }; use sp_application_crypto::{AppCrypto, ByteArray}; use sp_keystore::KeystorePtr; @@ -427,14 +427,15 @@ where .iter() .any(|v| keystore.has_keys(&[(v.to_raw_vec(), AuthorityDiscoveryId::ID)])); - let is_present_authority = session_info - .discovery_keys + // We could've checked discovery_keys but on Kusama validators.len() < discovery_keys.len(). + let is_present_validator = session_info + .validators .iter() - .any(|v| keystore.has_keys(&[(v.to_raw_vec(), AuthorityDiscoveryId::ID)])); + .any(|v| keystore.has_keys(&[(v.to_raw_vec(), ValidatorId::ID)])); // There is still a chance to be a previous session authority, but this extra work does not // affect the finalization. - is_past_present_or_future_authority && !is_present_authority + is_past_present_or_future_authority && !is_present_validator } // Sends PVF with unknown code hashes to the validation host returning the list of code hashes sent. diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs index 55282fdf4ee1..0dcd84bab6cf 100644 --- a/polkadot/node/core/candidate-validation/src/tests.rs +++ b/polkadot/node/core/candidate-validation/src/tests.rs @@ -25,13 +25,12 @@ use polkadot_node_subsystem::messages::AllMessages; use polkadot_node_subsystem_util::reexports::SubsystemContext; use polkadot_overseer::ActivatedLeaf; use polkadot_primitives::{ - CoreIndex, GroupIndex, HeadData, Id as ParaId, IndexedVec, SessionInfo, UpwardMessage, - ValidatorId, ValidatorIndex, + CoreIndex, GroupIndex, HeadData, Id as ParaId, SessionInfo, UpwardMessage, ValidatorId, }; use polkadot_primitives_test_helpers::{ dummy_collator, dummy_collator_signature, dummy_hash, make_valid_candidate_descriptor, }; -use sp_core::testing::TaskExecutor; +use sp_core::{sr25519::Public, testing::TaskExecutor}; use sp_keyring::Sr25519Keyring; use sp_keystore::{testing::MemoryKeystore, Keystore}; @@ -1194,10 +1193,10 @@ fn dummy_candidate_backed( ) } -fn dummy_session_info(discovery_keys: Vec) -> SessionInfo { +fn dummy_session_info(keys: Vec) -> SessionInfo { SessionInfo { - validators: IndexedVec::::from(vec![]), - discovery_keys, + validators: keys.iter().cloned().map(Into::into).collect(), + discovery_keys: keys.iter().cloned().map(Into::into).collect(), assignment_keys: vec![], validator_groups: Default::default(), n_cores: 4u32, @@ -1246,7 +1245,7 @@ fn maybe_prepare_validation_golden_path() { ctx_handle.recv().await, AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionInfo(index, tx))) => { assert_eq!(index, 1); - let _ = tx.send(Ok(Some(dummy_session_info(vec![Sr25519Keyring::Bob.public().into()])))); + let _ = tx.send(Ok(Some(dummy_session_info(vec![Sr25519Keyring::Bob.public()])))); } ); @@ -1364,7 +1363,7 @@ fn maybe_prepare_validation_resets_state_on_a_new_session() { ctx_handle.recv().await, AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionInfo(index, tx))) => { assert_eq!(index, 2); - let _ = tx.send(Ok(Some(dummy_session_info(vec![Sr25519Keyring::Bob.public().into()])))); + let _ = tx.send(Ok(Some(dummy_session_info(vec![Sr25519Keyring::Bob.public()])))); } ); }; @@ -1510,7 +1509,7 @@ fn maybe_prepare_validation_does_not_prepare_pvfs_if_not_a_validator_in_the_next ctx_handle.recv().await, AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionInfo(index, tx))) => { assert_eq!(index, 1); - let _ = tx.send(Ok(Some(dummy_session_info(vec![Sr25519Keyring::Bob.public().into()])))); + let _ = tx.send(Ok(Some(dummy_session_info(vec![Sr25519Keyring::Bob.public()])))); } ); }; @@ -1557,7 +1556,7 @@ fn maybe_prepare_validation_does_not_prepare_pvfs_if_a_validator_in_the_current_ ctx_handle.recv().await, AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionInfo(index, tx))) => { assert_eq!(index, 1); - let _ = tx.send(Ok(Some(dummy_session_info(vec![Sr25519Keyring::Alice.public().into()])))); + let _ = tx.send(Ok(Some(dummy_session_info(vec![Sr25519Keyring::Alice.public()])))); } ); }; @@ -1604,7 +1603,7 @@ fn maybe_prepare_validation_prepares_a_limited_number_of_pvfs() { ctx_handle.recv().await, AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionInfo(index, tx))) => { assert_eq!(index, 1); - let _ = tx.send(Ok(Some(dummy_session_info(vec![Sr25519Keyring::Bob.public().into()])))); + let _ = tx.send(Ok(Some(dummy_session_info(vec![Sr25519Keyring::Bob.public()])))); } ); diff --git a/prdoc/pr_5606.prdoc b/prdoc/pr_5606.prdoc new file mode 100644 index 000000000000..46883c5722cd --- /dev/null +++ b/prdoc/pr_5606.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix PVF precompilation for Kusama + +doc: + - audience: Node Operator + description: | + Tweaks the PVF precompilation on Kusama to allow prepare PVFs when the node is an authority but not a validator. + +crates: + - name: polkadot-node-core-candidate-validation + bump: patch From 5fdc0369758989a7d05c95de76183d63240fdaec Mon Sep 17 00:00:00 2001 From: Alistair Singh Date: Fri, 6 Sep 2024 14:28:44 +0200 Subject: [PATCH 09/66] do not allow changes to token_id locations once set --- bridges/snowbridge/pallets/system/src/lib.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/bridges/snowbridge/pallets/system/src/lib.rs b/bridges/snowbridge/pallets/system/src/lib.rs index 16e9a6bb6361..092eb1addb47 100644 --- a/bridges/snowbridge/pallets/system/src/lib.rs +++ b/bridges/snowbridge/pallets/system/src/lib.rs @@ -733,17 +733,18 @@ pub mod pallet { pays_fee: PaysFee, ) -> Result<(), DispatchError> { let bridge_location = Location::new(2, [GlobalConsensus(T::EthereumNetwork::get())]); - let mut location = location.clone(); - location - .reanchor(&bridge_location, &T::UniversalLocation::get()) + let location = location + .clone() + .reanchored(&bridge_location, &T::UniversalLocation::get()) .map_err(|_| Error::::LocationConversionFailed)?; - // Record the token id or fail if it has already been created let token_id = TokenIdOf::convert_location(&location) .ok_or(Error::::LocationConversionFailed)?; - ForeignToNativeId::::insert(token_id, location.clone()); - NativeToForeignId::::insert(location.clone(), token_id); + if (!ForeignToNativeId::::contains_key(token_id)) { + ForeignToNativeId::::insert(token_id, location.clone()); + NativeToForeignId::::insert(location.clone(), token_id); + } let command = Command::RegisterForeignToken { token_id, From b395eafc4aa53042606fbda6fb7a7ba4e287fbb8 Mon Sep 17 00:00:00 2001 From: Alistair Singh Date: Fri, 6 Sep 2024 14:31:42 +0200 Subject: [PATCH 10/66] fix syntax --- bridges/snowbridge/pallets/system/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bridges/snowbridge/pallets/system/src/lib.rs b/bridges/snowbridge/pallets/system/src/lib.rs index 092eb1addb47..d935bb78579e 100644 --- a/bridges/snowbridge/pallets/system/src/lib.rs +++ b/bridges/snowbridge/pallets/system/src/lib.rs @@ -741,9 +741,9 @@ pub mod pallet { let token_id = TokenIdOf::convert_location(&location) .ok_or(Error::::LocationConversionFailed)?; - if (!ForeignToNativeId::::contains_key(token_id)) { - ForeignToNativeId::::insert(token_id, location.clone()); + if !ForeignToNativeId::::contains_key(token_id) { NativeToForeignId::::insert(location.clone(), token_id); + ForeignToNativeId::::insert(token_id, location.clone()); } let command = Command::RegisterForeignToken { From b2089d88bd6ef40ca17ddfa097b05b257cdcdf13 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Fri, 6 Sep 2024 15:56:29 +0200 Subject: [PATCH 11/66] [ci] Fix final job for required workflows (#5619) Currently if a required job fails the final jobs is skipped which breaks the logic of required jobs. PR fixes it. Closes https://github.com/paritytech/ci_cd/issues/1033 --- .github/workflows/build-misc.yml | 24 ++++++----- .../workflows/check-cargo-check-runtimes.yml | 14 ++++-- .github/workflows/check-runtime-migration.yml | 13 +++++- .github/workflows/checks.yml | 13 +++++- .github/workflows/tests-linux-stable.yml | 11 ++++- .github/workflows/tests-misc.yml | 43 +++++++++++-------- 6 files changed, 83 insertions(+), 35 deletions(-) diff --git a/.github/workflows/build-misc.yml b/.github/workflows/build-misc.yml index c85549b37999..a01384dc002c 100644 --- a/.github/workflows/build-misc.yml +++ b/.github/workflows/build-misc.yml @@ -5,10 +5,9 @@ on: branches: - master pull_request: - types: [ opened, synchronize, reopened, ready_for_review ] + types: [opened, synchronize, reopened, ready_for_review] merge_group: - concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true @@ -41,7 +40,7 @@ jobs: build-runtimes-polkavm: timeout-minutes: 20 - needs: [ set-image ] + needs: [set-image] runs-on: ${{ needs.set-image.outputs.RUNNER }} container: image: ${{ needs.set-image.outputs.IMAGE }} @@ -65,7 +64,7 @@ jobs: build-subkey: timeout-minutes: 20 - needs: [ set-image ] + needs: [set-image] runs-on: ${{ needs.set-image.outputs.RUNNER }} container: image: ${{ needs.set-image.outputs.IMAGE }} @@ -89,10 +88,15 @@ jobs: runs-on: ubuntu-latest name: All build misc jobs passed # If any new job gets added, be sure to add it to this array - needs: - [ - build-runtimes-polkavm, - build-subkey - ] + needs: [build-runtimes-polkavm, build-subkey] + if: always() && !cancelled() steps: - - run: echo '### Good job! All the build misc tests passed 🚀' >> $GITHUB_STEP_SUMMARY \ No newline at end of file + - run: | + tee resultfile <<< '${{ toJSON(needs) }}' + FAILURES=$(cat resultfile | grep '"result": "failure"' | wc -l) + if [ $FAILURES -gt 0 ]; then + echo "### At least one required job failed ❌" >> $GITHUB_STEP_SUMMARY + exit 1 + else + echo '### Good job! All the required jobs passed 🚀' >> $GITHUB_STEP_SUMMARY + fi diff --git a/.github/workflows/check-cargo-check-runtimes.yml b/.github/workflows/check-cargo-check-runtimes.yml index ebcf6c5fc9bd..6325033d214c 100644 --- a/.github/workflows/check-cargo-check-runtimes.yml +++ b/.github/workflows/check-cargo-check-runtimes.yml @@ -2,8 +2,7 @@ name: Check Cargo Check Runtimes on: pull_request: - types: [ opened, synchronize, reopened, ready_for_review, labeled ] - + types: [opened, synchronize, reopened, ready_for_review, labeled] # Jobs in this workflow depend on each other, only for limiting peak amount of spawned workers @@ -132,5 +131,14 @@ jobs: - check-runtime-contracts - check-runtime-starters - check-runtime-testing + if: always() && !cancelled() steps: - - run: echo '### Good job! All the tests passed 🚀' >> $GITHUB_STEP_SUMMARY + - run: | + tee resultfile <<< '${{ toJSON(needs) }}' + FAILURES=$(cat resultfile | grep '"result": "failure"' | wc -l) + if [ $FAILURES -gt 0 ]; then + echo "### At least one required job failed ❌" >> $GITHUB_STEP_SUMMARY + exit 1 + else + echo '### Good job! All the required jobs passed 🚀' >> $GITHUB_STEP_SUMMARY + fi diff --git a/.github/workflows/check-runtime-migration.yml b/.github/workflows/check-runtime-migration.yml index 5fb9dca38d17..0a1dbc4790c8 100644 --- a/.github/workflows/check-runtime-migration.yml +++ b/.github/workflows/check-runtime-migration.yml @@ -46,7 +46,7 @@ jobs: # We need to set this to rather long to allow the snapshot to be created, but the average time # should be much lower. timeout-minutes: 60 - needs: [ set-image ] + needs: [set-image] container: image: ${{ needs.set-image.outputs.IMAGE }} strategy: @@ -162,5 +162,14 @@ jobs: name: All runtime migrations passed # If any new job gets added, be sure to add it to this array needs: [check-runtime-migration] + if: always() && !cancelled() steps: - - run: echo '### Good job! All the checks passed 🚀' >> $GITHUB_STEP_SUMMARY + - run: | + tee resultfile <<< '${{ toJSON(needs) }}' + FAILURES=$(cat resultfile | grep '"result": "failure"' | wc -l) + if [ $FAILURES -gt 0 ]; then + echo "### At least one required job failed ❌" >> $GITHUB_STEP_SUMMARY + exit 1 + else + echo '### Good job! All the required jobs passed 🚀' >> $GITHUB_STEP_SUMMARY + fi diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index 9aebd83282e3..9de879d83676 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -12,7 +12,7 @@ concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true -permissions: { } +permissions: {} jobs: # temporary disabled because currently doesn't work in merge queue @@ -105,5 +105,14 @@ jobs: name: All checks passed # If any new job gets added, be sure to add it to this array needs: [cargo-clippy, check-try-runtime, check-core-crypto-features] + if: always() && !cancelled() steps: - - run: echo '### Good job! All the checks passed 🚀' >> $GITHUB_STEP_SUMMARY + - run: | + tee resultfile <<< '${{ toJSON(needs) }}' + FAILURES=$(cat resultfile | grep '"result": "failure"' | wc -l) + if [ $FAILURES -gt 0 ]; then + echo "### At least one required job failed ❌" >> $GITHUB_STEP_SUMMARY + exit 1 + else + echo '### Good job! All the required jobs passed 🚀' >> $GITHUB_STEP_SUMMARY + fi diff --git a/.github/workflows/tests-linux-stable.yml b/.github/workflows/tests-linux-stable.yml index 997d7622f0c3..7ed67703395f 100644 --- a/.github/workflows/tests-linux-stable.yml +++ b/.github/workflows/tests-linux-stable.yml @@ -138,5 +138,14 @@ jobs: test-linux-stable-runtime-benchmarks, test-linux-stable, ] + if: always() && !cancelled() steps: - - run: echo '### Good job! All the tests passed 🚀' >> $GITHUB_STEP_SUMMARY + - run: | + tee resultfile <<< '${{ toJSON(needs) }}' + FAILURES=$(cat resultfile | grep '"result": "failure"' | wc -l) + if [ $FAILURES -gt 0 ]; then + echo "### At least one required job failed ❌" >> $GITHUB_STEP_SUMMARY + exit 1 + else + echo '### Good job! All the required jobs passed 🚀' >> $GITHUB_STEP_SUMMARY + fi diff --git a/.github/workflows/tests-misc.yml b/.github/workflows/tests-misc.yml index 2e78f4a34ede..9aa6bf23727f 100644 --- a/.github/workflows/tests-misc.yml +++ b/.github/workflows/tests-misc.yml @@ -5,7 +5,7 @@ on: branches: - master pull_request: - types: [ opened, synchronize, reopened, ready_for_review ] + types: [opened, synchronize, reopened, ready_for_review] merge_group: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} @@ -43,12 +43,12 @@ jobs: echo "RUNNER=arc-runners-polkadot-sdk-beefy-persistent" >> $GITHUB_OUTPUT else echo "RUNNER=arc-runners-polkadot-sdk-beefy" >> $GITHUB_OUTPUT - fi - + fi + # more information about this job can be found here: # https://github.com/paritytech/substrate/pull/3778 test-full-crypto-feature: - needs: [ set-image ] + needs: [set-image] runs-on: ${{ needs.set-image.outputs.RUNNER }} timeout-minutes: 60 container: @@ -72,7 +72,7 @@ jobs: test-frame-examples-compile-to-wasm: timeout-minutes: 20 # into one job - needs: [ set-image, test-full-crypto-feature ] + needs: [set-image, test-full-crypto-feature] runs-on: ${{ needs.set-image.outputs.RUNNER }} container: image: ${{ needs.set-image.outputs.IMAGE }} @@ -93,7 +93,7 @@ jobs: test-frame-ui: timeout-minutes: 60 - needs: [ set-image, test-frame-examples-compile-to-wasm ] + needs: [set-image, test-frame-examples-compile-to-wasm] runs-on: ${{ needs.set-image.outputs.RUNNER }} container: image: ${{ needs.set-image.outputs.IMAGE }} @@ -121,7 +121,7 @@ jobs: test-deterministic-wasm: timeout-minutes: 20 - needs: [ set-image ] + needs: [set-image] runs-on: ${{ needs.set-image.outputs.RUNNER }} container: image: ${{ needs.set-image.outputs.IMAGE }} @@ -143,7 +143,7 @@ jobs: sha256sum -c checksum.sha256 cargo-check-benches-branches: - needs: [ set-image ] + needs: [set-image] if: ${{ github.event_name == 'pull_request' || github.event_name == 'merge_group' }} timeout-minutes: 60 outputs: @@ -156,7 +156,7 @@ jobs: echo "branch=['${{ github.base_ref }}', '${{ github.head_ref }}']" >> $GITHUB_OUTPUT cargo-check-benches: - needs: [ set-image, cargo-check-benches-branches ] + needs: [set-image, cargo-check-benches-branches] timeout-minutes: 60 strategy: matrix: @@ -191,7 +191,7 @@ jobs: node-bench-regression-guard: timeout-minutes: 20 runs-on: arc-runners-polkadot-sdk - needs: [ set-image, cargo-check-benches ] + needs: [set-image, cargo-check-benches] steps: - name: Checkout uses: actions/checkout@v4.1.7 @@ -227,7 +227,7 @@ jobs: fi test-node-metrics: - needs: [ set-image ] + needs: [set-image] timeout-minutes: 30 runs-on: ${{ needs.set-image.outputs.RUNNER }} container: @@ -260,7 +260,7 @@ jobs: # https://github.com/paritytech/substrate/pull/6916 check-tracing: timeout-minutes: 20 - needs: [ set-image, test-node-metrics ] + needs: [set-image, test-node-metrics] runs-on: ${{ needs.set-image.outputs.RUNNER }} container: image: ${{ needs.set-image.outputs.IMAGE }} @@ -275,7 +275,7 @@ jobs: check-metadata-hash: timeout-minutes: 20 - needs: [ set-image, check-tracing ] + needs: [set-image, check-tracing] runs-on: ${{ needs.set-image.outputs.RUNNER }} container: image: ${{ needs.set-image.outputs.IMAGE }} @@ -289,7 +289,7 @@ jobs: cargo-hfuzz: timeout-minutes: 20 - needs: [ set-image, check-metadata-hash ] + needs: [set-image, check-metadata-hash] runs-on: ${{ needs.set-image.outputs.RUNNER }} container: image: ${{ needs.set-image.outputs.IMAGE }} @@ -329,7 +329,7 @@ jobs: cargo-check-each-crate: timeout-minutes: 140 - needs: [ set-image ] + needs: [set-image] runs-on: ${{ needs.set-image.outputs.RUNNER }} container: image: ${{ needs.set-image.outputs.IMAGE }} @@ -338,7 +338,7 @@ jobs: CI_JOB_NAME: cargo-check-each-crate strategy: matrix: - index: [ 1,2,3,4,5,6,7 ] # 7 parallel jobs + index: [1, 2, 3, 4, 5, 6, 7] # 7 parallel jobs steps: - name: Checkout uses: actions/checkout@v4.1.7 @@ -394,5 +394,14 @@ jobs: - cargo-check-each-crate - test-deterministic-wasm # - cargo-hfuzz remove from required for now, as it's flaky + if: always() && !cancelled() steps: - - run: echo '### Good job! All the required tests passed 🚀' >> $GITHUB_STEP_SUMMARY \ No newline at end of file + - run: | + tee resultfile <<< '${{ toJSON(needs) }}' + FAILURES=$(cat resultfile | grep '"result": "failure"' | wc -l) + if [ $FAILURES -gt 0 ]; then + echo "### At least one required job failed ❌" >> $GITHUB_STEP_SUMMARY + exit 1 + else + echo '### Good job! All the required jobs passed 🚀' >> $GITHUB_STEP_SUMMARY + fi From c0ce942513d27e15f4f084cb4f02bf718502c0ed Mon Sep 17 00:00:00 2001 From: ron Date: Fri, 6 Sep 2024 22:25:17 +0800 Subject: [PATCH 12/66] Use Blake2_* hasher instead --- bridges/snowbridge/pallets/system/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bridges/snowbridge/pallets/system/src/lib.rs b/bridges/snowbridge/pallets/system/src/lib.rs index 16e9a6bb6361..f2b8cce1def2 100644 --- a/bridges/snowbridge/pallets/system/src/lib.rs +++ b/bridges/snowbridge/pallets/system/src/lib.rs @@ -269,12 +269,12 @@ pub mod pallet { /// Lookup table for foreign to native token ID #[pallet::storage] pub type ForeignToNativeId = - StorageMap<_, Twox64Concat, TokenId, xcm::v4::Location, OptionQuery>; + StorageMap<_, Blake2_128Concat, TokenId, xcm::v4::Location, OptionQuery>; /// Lookup table for native to foreign token ID #[pallet::storage] pub type NativeToForeignId = - StorageMap<_, Twox64Concat, xcm::v4::Location, TokenId, OptionQuery>; + StorageMap<_, Blake2_128Concat, xcm::v4::Location, TokenId, OptionQuery>; #[pallet::genesis_config] #[derive(frame_support::DefaultNoBound)] From 365d9928a8e71bf3467f06f89a1786f8d3d349c4 Mon Sep 17 00:00:00 2001 From: Maksym H <1177472+mordamax@users.noreply.github.com> Date: Fri, 6 Sep 2024 16:25:48 +0100 Subject: [PATCH 13/66] Update tests-misc.yml (#5615) Fixes https://github.com/paritytech/ci_cd/issues/1032 --------- Co-authored-by: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> --- .github/workflows/tests-misc.yml | 25 ++++++++----------------- 1 file changed, 8 insertions(+), 17 deletions(-) diff --git a/.github/workflows/tests-misc.yml b/.github/workflows/tests-misc.yml index 9aa6bf23727f..8e8f5770e92d 100644 --- a/.github/workflows/tests-misc.yml +++ b/.github/workflows/tests-misc.yml @@ -142,25 +142,13 @@ jobs: # confirm checksum sha256sum -c checksum.sha256 - cargo-check-benches-branches: + cargo-check-benches: needs: [set-image] if: ${{ github.event_name == 'pull_request' || github.event_name == 'merge_group' }} timeout-minutes: 60 - outputs: - branch: ${{ steps.branch.outputs.branch }} - runs-on: ubuntu-latest - steps: - - name: Branch - id: branch - run: | - echo "branch=['${{ github.base_ref }}', '${{ github.head_ref }}']" >> $GITHUB_OUTPUT - - cargo-check-benches: - needs: [set-image, cargo-check-benches-branches] - timeout-minutes: 60 strategy: matrix: - branch: ${{ fromJSON(needs.cargo-check-benches-branches.outputs.branch) }} + branch: [ master, current ] runs-on: ${{ needs.set-image.outputs.RUNNER }} container: image: ${{ needs.set-image.outputs.IMAGE }} @@ -168,7 +156,9 @@ jobs: - name: Checkout uses: actions/checkout@v4 with: - ref: ${{ matrix.branch }} + # if branch is master, use the branch, otherwise set empty string, so it uses the current context + # either PR (including forks) or merge group (main repo) + ref: ${{ matrix.branch == 'master' && matrix.branch || '' }} - name: script run: | @@ -190,6 +180,7 @@ jobs: node-bench-regression-guard: timeout-minutes: 20 + if: always() && !cancelled() runs-on: arc-runners-polkadot-sdk needs: [set-image, cargo-check-benches] steps: @@ -199,13 +190,13 @@ jobs: - name: Download artifact (master run) uses: actions/download-artifact@v4.1.8 with: - name: cargo-check-benches-${{ github.base_ref }}-${{ github.sha }} + name: cargo-check-benches-master-${{ github.sha }} path: ./artifacts/master - name: Download artifact (current run) uses: actions/download-artifact@v4.1.8 with: - name: cargo-check-benches-${{ github.head_ref }}-${{ github.sha }} + name: cargo-check-benches-current-${{ github.sha }} path: ./artifacts/current - name: script From c94da518d0165a222d978925784ddea3b0c2fa19 Mon Sep 17 00:00:00 2001 From: ron Date: Sat, 7 Sep 2024 08:49:32 +0800 Subject: [PATCH 14/66] Fix to reanchor in context of Ethereum --- .../primitives/router/src/inbound/tests.rs | 95 ++++++------------- .../src/bridge_to_ethereum_config.rs | 5 +- .../src/bridge_to_ethereum_config.rs | 6 +- 3 files changed, 37 insertions(+), 69 deletions(-) diff --git a/bridges/snowbridge/primitives/router/src/inbound/tests.rs b/bridges/snowbridge/primitives/router/src/inbound/tests.rs index 7b4469c1a68c..1c018f3ab0c4 100644 --- a/bridges/snowbridge/primitives/router/src/inbound/tests.rs +++ b/bridges/snowbridge/primitives/router/src/inbound/tests.rs @@ -40,68 +40,35 @@ fn test_contract_location_with_incorrect_location_fails_convert() { } #[test] -fn test_reanchor_relay_token() { - let asset_id: Location = Location::parent(); - let ah_context: InteriorLocation = [GlobalConsensus(Westend), Parachain(1000)].into(); - let ethereum = Location::new(2, [GlobalConsensus(Ethereum { chain_id: 1 })]); - let mut reanchored_asset = asset_id.clone(); - assert_ok!(reanchored_asset.reanchor(ðereum, &ah_context)); - assert_eq!( - reanchored_asset, - Location { parents: 1, interior: [GlobalConsensus(Westend)].into() } - ); - let bh_context: InteriorLocation = [GlobalConsensus(Westend), Parachain(1002)].into(); - let ah = Location::new(1, [GlobalConsensus(Westend), Parachain(1000)]); - let mut reanchored_asset = reanchored_asset.clone(); - assert_ok!(reanchored_asset.reanchor(&ah, &bh_context)); - assert_eq!(reanchored_asset, asset_id); -} - -#[test] -fn test_reanchor_pna_from_ah() { - let asset_id_in_ah: Location = - Location { parents: 0, interior: [PalletInstance(50), GeneralIndex(2)].into() }; - let asset_id: Location = Location { - parents: 1, - interior: [Parachain(1000), PalletInstance(50), GeneralIndex(2)].into(), - }; - let bh_context: InteriorLocation = [GlobalConsensus(Westend), Parachain(1002)].into(); - let ethereum = Location::new(2, [GlobalConsensus(Ethereum { chain_id: 1 })]); - let mut reanchored_asset = asset_id.clone(); - assert_ok!(reanchored_asset.reanchor(ðereum, &bh_context)); - assert_eq!( - reanchored_asset, - Location { - parents: 1, - interior: [ - GlobalConsensus(Westend), - Parachain(1000), - PalletInstance(50), - GeneralIndex(2) - ] - .into() - } - ); - let ah = Location::new(1, [GlobalConsensus(Westend), Parachain(1000)]); - let mut reanchored_asset = reanchored_asset.clone(); - assert_ok!(reanchored_asset.reanchor(&ah, &bh_context)); - assert_eq!(reanchored_asset, asset_id_in_ah); -} - -#[test] -fn test_reanchor_pna_from_para() { - let asset_id_in_ah: Location = Location { parents: 1, interior: [Parachain(2000)].into() }; - let asset_id: Location = Location { parents: 1, interior: [Parachain(2000)].into() }; - let bh_context: InteriorLocation = [GlobalConsensus(Westend), Parachain(1002)].into(); - let ethereum = Location::new(2, [GlobalConsensus(Ethereum { chain_id: 1 })]); - let mut reanchored_asset = asset_id.clone(); - assert_ok!(reanchored_asset.reanchor(ðereum, &bh_context)); - assert_eq!( - reanchored_asset, - Location { parents: 1, interior: [GlobalConsensus(Westend), Parachain(2000)].into() } - ); - let ah = Location::new(1, [GlobalConsensus(Westend), Parachain(1000)]); - let mut reanchored_asset = reanchored_asset.clone(); - assert_ok!(reanchored_asset.reanchor(&ah, &bh_context)); - assert_eq!(reanchored_asset, asset_id_in_ah); +fn test_reanchor_all_assets() { + let ethereum_context: InteriorLocation = [GlobalConsensus(Ethereum { chain_id: 1 })].into(); + let ethereum = Location::new(2, ethereum_context.clone()); + let ah_context: InteriorLocation = [GlobalConsensus(Polkadot), Parachain(1000)].into(); + let global_ah = Location::new(1, ah_context.clone()); + let bh_context: InteriorLocation = [GlobalConsensus(Polkadot), Parachain(1002)].into(); + let assets = vec![ + // DOT + Location::new(1, []), + // GLMR (Some Polkadot parachain currency) + Location::new(1, [Parachain(2004)]), + // AH asset + Location::new(0, [PalletInstance(50), GeneralIndex(42)]), + // KSM + Location::new(2, [GlobalConsensus(Kusama)]), + // KAR (Some Kusama parachain currency) + Location::new(2, [GlobalConsensus(Kusama), Parachain(2000)]), + ]; + for asset in assets.iter() { + // reanchor logic in pallet_xcm on AH + let mut reanchored_asset = asset.clone(); + assert_ok!(reanchored_asset.reanchor(ðereum, &ah_context)); + // reanchor back to original location in context of Ethereum + let mut reanchored_asset_with_ethereum_context = reanchored_asset.clone(); + assert_ok!(reanchored_asset_with_ethereum_context.reanchor(&global_ah, ðereum_context)); + assert_eq!(reanchored_asset_with_ethereum_context, asset.clone()); + // reanchor back to original location in context of BH + let mut reanchored_asset_with_bh_context = reanchored_asset.clone(); + assert_ok!(reanchored_asset_with_bh_context.reanchor(&global_ah, &bh_context)); + assert_eq!(reanchored_asset_with_bh_context, asset.clone()); + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs index 1e096f19ef80..fde214ed42d6 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs @@ -41,7 +41,7 @@ use sp_runtime::{ traits::{ConstU32, ConstU8, Keccak256}, FixedU128, }; -use xcm::prelude::{GlobalConsensus, Location, Parachain}; +use xcm::prelude::{GlobalConsensus, InteriorLocation, Location, Parachain}; /// Exports message to the Ethereum Gateway contract. pub type SnowbridgeExporter = EthereumBlobExporter< @@ -67,6 +67,7 @@ parameter_types! { multiplier: FixedU128::from_rational(1, 1), }; pub GlobalAssetHub: Location = Location::new(1,[GlobalConsensus(RelayNetwork::get()),Parachain(rococo_runtime_constants::system_parachain::ASSET_HUB_ID)]); + pub EthereumUniversalLocation: InteriorLocation = [GlobalConsensus(EthereumNetwork::get())].into(); } impl snowbridge_pallet_inbound_queue::Config for Runtime { @@ -88,7 +89,7 @@ impl snowbridge_pallet_inbound_queue::Config for Runtime { AccountId, Balance, EthereumSystem, - UniversalLocation, + EthereumUniversalLocation, GlobalAssetHub, >; type WeightToFee = WeightToFee; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs index fee9f9a2f610..4b3f61a42ad4 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs @@ -42,7 +42,7 @@ use sp_runtime::{ traits::{ConstU32, ConstU8, Keccak256}, FixedU128, }; -use xcm::prelude::{GlobalConsensus, Location, Parachain}; +use xcm::prelude::{GlobalConsensus, InteriorLocation, Location, Parachain}; pub const SLOTS_PER_EPOCH: u32 = snowbridge_pallet_ethereum_client::config::SLOTS_PER_EPOCH as u32; @@ -70,8 +70,8 @@ parameter_types! { multiplier: FixedU128::from_rational(1, 1), }; pub GlobalAssetHub: Location = Location::new(1,[GlobalConsensus(RelayNetwork::get()),Parachain(westend_runtime_constants::system_parachain::ASSET_HUB_ID)]); + pub EthereumUniversalLocation: InteriorLocation = [GlobalConsensus(EthereumNetwork::get())].into(); } - impl snowbridge_pallet_inbound_queue::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Verifier = snowbridge_pallet_ethereum_client::Pallet; @@ -91,7 +91,7 @@ impl snowbridge_pallet_inbound_queue::Config for Runtime { AccountId, Balance, EthereumSystem, - UniversalLocation, + EthereumUniversalLocation, GlobalAssetHub, >; type WeightToFee = WeightToFee; From 176c7af58f73c8e78c004d35e7a30f3a9faa1d5f Mon Sep 17 00:00:00 2001 From: Ron Date: Sat, 7 Sep 2024 09:38:42 +0800 Subject: [PATCH 15/66] Update bridges/snowbridge/pallets/system/src/tests.rs Co-authored-by: Francisco Aguirre --- bridges/snowbridge/pallets/system/src/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bridges/snowbridge/pallets/system/src/tests.rs b/bridges/snowbridge/pallets/system/src/tests.rs index 0745f435ba05..d7493e02acb2 100644 --- a/bridges/snowbridge/pallets/system/src/tests.rs +++ b/bridges/snowbridge/pallets/system/src/tests.rs @@ -633,7 +633,7 @@ fn no_genesis_build_is_uninitialized() { } #[test] -fn register_token_with_root_yeilds_success() { +fn register_token_with_root_yields_success() { new_test_ext(true).execute_with(|| { let origin = RuntimeOrigin::root(); let location = Location::new(1, []); From 0944d8a034404372e990edf6d29b62bf7fe5a772 Mon Sep 17 00:00:00 2001 From: ron Date: Sat, 7 Sep 2024 09:46:00 +0800 Subject: [PATCH 16/66] Use Polkadot as RelayNetwork in tests --- bridges/snowbridge/pallets/system/src/mock.rs | 2 +- bridges/snowbridge/pallets/system/src/tests.rs | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/bridges/snowbridge/pallets/system/src/mock.rs b/bridges/snowbridge/pallets/system/src/mock.rs index d8c905449d9a..72605ea2283e 100644 --- a/bridges/snowbridge/pallets/system/src/mock.rs +++ b/bridges/snowbridge/pallets/system/src/mock.rs @@ -166,7 +166,7 @@ impl snowbridge_pallet_outbound_queue::Config for Test { parameter_types! { pub const SS58Prefix: u8 = 42; pub const AnyNetwork: Option = None; - pub const RelayNetwork: Option = Some(NetworkId::Kusama); + pub const RelayNetwork: Option = Some(NetworkId::Polkadot); pub const RelayLocation: Location = Location::parent(); pub UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get().unwrap()), Parachain(1013)].into(); diff --git a/bridges/snowbridge/pallets/system/src/tests.rs b/bridges/snowbridge/pallets/system/src/tests.rs index 0745f435ba05..4f24b0eb9856 100644 --- a/bridges/snowbridge/pallets/system/src/tests.rs +++ b/bridges/snowbridge/pallets/system/src/tests.rs @@ -647,8 +647,8 @@ fn register_token_with_root_yeilds_success() { assert_ok!(EthereumSystem::register_token(origin, versioned_location, asset_metadata)); let expected_token_id = - hex!("03b6054d0c576dd8391e34e1609cf398f68050c23009d19ce93c000922bcd852").into(); - let expected_location = Location::new(1, [GlobalConsensus(Kusama)]); + hex!("4e241583d94b5d48a27a22064cd49b2ed6f5231d2d950e432f9b7c2e0ade52b2").into(); + let expected_location = Location::new(1, [GlobalConsensus(Polkadot)]); System::assert_last_event(RuntimeEvent::EthereumSystem( crate::Event::::RegisterToken { @@ -674,8 +674,8 @@ fn register_token_with_relative_address_reanchors_to_ethereum_and_succeeds() { assert_ok!(EthereumSystem::register_token(origin, versioned_location, asset_metadata)); let expected_token_id = - hex!("03b6054d0c576dd8391e34e1609cf398f68050c23009d19ce93c000922bcd852").into(); - let expected_location = Location::new(1, [GlobalConsensus(Kusama)]); + hex!("4e241583d94b5d48a27a22064cd49b2ed6f5231d2d950e432f9b7c2e0ade52b2").into(); + let expected_location = Location::new(1, [GlobalConsensus(Polkadot)]); System::assert_last_event(RuntimeEvent::EthereumSystem( crate::Event::::RegisterToken { From 0fa95ac5ac64a2f546e1c88f183781ea8b31c747 Mon Sep 17 00:00:00 2001 From: ron Date: Sat, 7 Sep 2024 10:53:21 +0800 Subject: [PATCH 17/66] Refactor relative locations tests --- bridges/snowbridge/pallets/system/src/mock.rs | 1 + .../snowbridge/pallets/system/src/tests.rs | 251 +++++++----------- 2 files changed, 98 insertions(+), 154 deletions(-) diff --git a/bridges/snowbridge/pallets/system/src/mock.rs b/bridges/snowbridge/pallets/system/src/mock.rs index 72605ea2283e..f70641288250 100644 --- a/bridges/snowbridge/pallets/system/src/mock.rs +++ b/bridges/snowbridge/pallets/system/src/mock.rs @@ -171,6 +171,7 @@ parameter_types! { pub UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get().unwrap()), Parachain(1013)].into(); pub EthereumNetwork: NetworkId = NetworkId::Ethereum { chain_id: 11155111 }; + pub EthereumDestination: Location = Location::new(2,[GlobalConsensus(EthereumNetwork::get())]); } pub const DOT: u128 = 10_000_000_000; diff --git a/bridges/snowbridge/pallets/system/src/tests.rs b/bridges/snowbridge/pallets/system/src/tests.rs index 574dd4c53bb7..3920d1c7e2ad 100644 --- a/bridges/snowbridge/pallets/system/src/tests.rs +++ b/bridges/snowbridge/pallets/system/src/tests.rs @@ -633,118 +633,10 @@ fn no_genesis_build_is_uninitialized() { } #[test] -fn register_token_with_root_yields_success() { - new_test_ext(true).execute_with(|| { - let origin = RuntimeOrigin::root(); - let location = Location::new(1, []); - let versioned_location: Box = Box::new(location.clone().into()); - let asset_metadata = AssetMetadata { - decimals: 10, - name: b"Dot".to_vec().try_into().unwrap(), - symbol: b"DOT".to_vec().try_into().unwrap(), - }; - - assert_ok!(EthereumSystem::register_token(origin, versioned_location, asset_metadata)); - - let expected_token_id = - hex!("4e241583d94b5d48a27a22064cd49b2ed6f5231d2d950e432f9b7c2e0ade52b2").into(); - let expected_location = Location::new(1, [GlobalConsensus(Polkadot)]); - - System::assert_last_event(RuntimeEvent::EthereumSystem( - crate::Event::::RegisterToken { - location: expected_location.into(), - foreign_token_id: expected_token_id, - }, - )); - }); -} - -#[test] -fn register_token_with_relative_address_reanchors_to_ethereum_and_succeeds() { - new_test_ext(true).execute_with(|| { - let origin = RuntimeOrigin::root(); - let location = Location::new(1, []); - let versioned_location: Box = Box::new(location.clone().into()); - let asset_metadata = AssetMetadata { - decimals: 10, - name: b"Dot".to_vec().try_into().unwrap(), - symbol: b"DOT".to_vec().try_into().unwrap(), - }; - - assert_ok!(EthereumSystem::register_token(origin, versioned_location, asset_metadata)); - - let expected_token_id = - hex!("4e241583d94b5d48a27a22064cd49b2ed6f5231d2d950e432f9b7c2e0ade52b2").into(); - let expected_location = Location::new(1, [GlobalConsensus(Polkadot)]); - - System::assert_last_event(RuntimeEvent::EthereumSystem( - crate::Event::::RegisterToken { - location: expected_location.into(), - foreign_token_id: expected_token_id, - }, - )); - }); -} - -#[test] -fn register_token_with_complex_location_simplifies_and_succeeds() { - new_test_ext(true).execute_with(|| { - let origin = RuntimeOrigin::root(); - let location = Location::new(2, [GlobalConsensus(Kusama)]); - let versioned_location: Box = Box::new(location.clone().into()); - let asset_metadata = AssetMetadata { - decimals: 10, - name: b"Dot".to_vec().try_into().unwrap(), - symbol: b"DOT".to_vec().try_into().unwrap(), - }; - - assert_ok!(EthereumSystem::register_token(origin, versioned_location, asset_metadata)); - - let expected_token_id = - hex!("03b6054d0c576dd8391e34e1609cf398f68050c23009d19ce93c000922bcd852").into(); - let expected_location = Location::new(1, [GlobalConsensus(Kusama)]); - - System::assert_last_event(RuntimeEvent::EthereumSystem( - crate::Event::::RegisterToken { - location: expected_location.into(), - foreign_token_id: expected_token_id, - }, - )); - }); -} - -#[test] -fn register_token_with_doubled_bridged_polkadot_location_succeeds() { - new_test_ext(true).execute_with(|| { - let origin = RuntimeOrigin::root(); - let location = Location::new(2, [GlobalConsensus(Rococo)]); - let versioned_location: Box = Box::new(location.clone().into()); - let asset_metadata = AssetMetadata { - decimals: 10, - name: b"Dot".to_vec().try_into().unwrap(), - symbol: b"DOT".to_vec().try_into().unwrap(), - }; - - assert_ok!(EthereumSystem::register_token(origin, versioned_location, asset_metadata)); - - let expected_token_id = - hex!("62e8f33b7fb0e7e2d2276564061a2f3c7bcb612e733b8bf5733ea16cee0ecba6").into(); - let expected_location = Location::new(1, [GlobalConsensus(Rococo)]); - - System::assert_last_event(RuntimeEvent::EthereumSystem( - crate::Event::::RegisterToken { - location: expected_location.into(), - foreign_token_id: expected_token_id, - }, - )); - }); -} - -#[test] -fn register_token_with_ethereum_address_reanchors_to_relative_and_fails() { +fn register_token_with_signed_yeilds_bad_origin() { new_test_ext(true).execute_with(|| { - let origin = RuntimeOrigin::root(); - let location = Location::new(2, [GlobalConsensus(Ethereum { chain_id: 11155111 })]); + let origin = RuntimeOrigin::signed([14; 32].into()); + let location = Location::new(1, [Parachain(2000)]); let versioned_location: Box = Box::new(location.clone().into()); let asset_metadata = AssetMetadata { decimals: 10, @@ -754,54 +646,105 @@ fn register_token_with_ethereum_address_reanchors_to_relative_and_fails() { assert_noop!( EthereumSystem::register_token(origin, versioned_location, asset_metadata), - Error::::LocationConversionFailed + BadOrigin ); }); } -#[test] -fn register_token_with_double_bridged_ethereum_address_succeeds() { - new_test_ext(true).execute_with(|| { - let origin = RuntimeOrigin::root(); - const NETWORK: NetworkId = Ethereum { chain_id: 1 }; - let location = Location::new(2, [GlobalConsensus(NETWORK)]); - let versioned_location: Box = Box::new(location.clone().into()); - let asset_metadata = AssetMetadata { - decimals: 10, - name: b"Dot".to_vec().try_into().unwrap(), - symbol: b"DOT".to_vec().try_into().unwrap(), - }; - - assert_ok!(EthereumSystem::register_token(origin, versioned_location, asset_metadata)); - - let expected_token_id: H256 = - hex!("37fd94739deb1c2a8929b45a4f70ffcb52de8b54791609ee13ee0a2b33730269").into(); - let expected_location = Location::new(1, [GlobalConsensus(NETWORK)]); - - System::assert_last_event(RuntimeEvent::EthereumSystem( - crate::Event::::RegisterToken { - location: expected_location.into(), - foreign_token_id: expected_token_id, - }, - )); - }); +pub struct TokenInfo { + pub location: Location, + pub metadata: AssetMetadata, + pub foreign_token_id: TokenId, } #[test] -fn register_token_with_signed_yeilds_bad_origin() { - new_test_ext(true).execute_with(|| { - let origin = RuntimeOrigin::signed([14; 32].into()); - let location = Location::new(1, [Parachain(2000)]); - let versioned_location: Box = Box::new(location.clone().into()); - let asset_metadata = AssetMetadata { - decimals: 10, - name: b"Dot".to_vec().try_into().unwrap(), - symbol: b"DOT".to_vec().try_into().unwrap(), - }; - - assert_noop!( - EthereumSystem::register_token(origin, versioned_location, asset_metadata), - BadOrigin - ); - }); +fn register_all_tokens_succeeds() { + let assets = vec![ + // DOT + TokenInfo { + location: Location::parent(), + metadata: AssetMetadata { + decimals: 10, + name: b"DOT".to_vec().try_into().unwrap(), + symbol: b"DOT".to_vec().try_into().unwrap(), + }, + foreign_token_id: hex!( + "4e241583d94b5d48a27a22064cd49b2ed6f5231d2d950e432f9b7c2e0ade52b2" + ) + .into(), + }, + // GLMR (Some Polkadot parachain currency) + TokenInfo { + location: Location::new(1, [Parachain(2004)]), + metadata: AssetMetadata { + decimals: 10, + name: b"GLMR".to_vec().try_into().unwrap(), + symbol: b"GLMR".to_vec().try_into().unwrap(), + }, + foreign_token_id: hex!( + "34c08fc90409b6924f0e8eabb7c2aaa0c749e23e31adad9f6d217b577737fafb" + ) + .into(), + }, + // USDT + TokenInfo { + location: Location::new(1, [Parachain(1000), PalletInstance(50), GeneralIndex(1084)]), + metadata: AssetMetadata { + decimals: 10, + name: b"USDT".to_vec().try_into().unwrap(), + symbol: b"USDT".to_vec().try_into().unwrap(), + }, + foreign_token_id: hex!( + "d49fe2118be0cca618e4d171e60ffea98b7b648dd80dc37d6342116b910b7aa5" + ) + .into(), + }, + // KSM + TokenInfo { + location: Location::new(2, [GlobalConsensus(Kusama)]), + metadata: AssetMetadata { + decimals: 12, + name: b"KSM".to_vec().try_into().unwrap(), + symbol: b"KSM".to_vec().try_into().unwrap(), + }, + foreign_token_id: hex!( + "03b6054d0c576dd8391e34e1609cf398f68050c23009d19ce93c000922bcd852" + ) + .into(), + }, + // KAR (Some Kusama parachain currency) + TokenInfo { + location: Location::new(2, [GlobalConsensus(Kusama), Parachain(2000)]), + metadata: AssetMetadata { + decimals: 12, + name: b"KAR".to_vec().try_into().unwrap(), + symbol: b"KAR".to_vec().try_into().unwrap(), + }, + foreign_token_id: hex!( + "d3e39ad6ea4cee68c9741181e94098823b2ea34a467577d0875c036f0fce5be0" + ) + .into(), + }, + ]; + for asset in assets.iter() { + new_test_ext(true).execute_with(|| { + let origin = RuntimeOrigin::root(); + let versioned_location: Box = + Box::new(asset.location.clone().into()); + let asset_metadata = asset.metadata.clone(); + + assert_ok!(EthereumSystem::register_token(origin, versioned_location, asset_metadata)); + + let location = asset + .location + .clone() + .reanchored(&EthereumDestination::get(), &UniversalLocation::get()) + .unwrap(); + + System::assert_last_event(RuntimeEvent::EthereumSystem(Event::::RegisterToken { + location: location.into(), + foreign_token_id: asset.foreign_token_id, + })); + }); + } } From df60eb864e9d39bb5808d9ea284120bbf3f7a967 Mon Sep 17 00:00:00 2001 From: ron Date: Sat, 7 Sep 2024 11:03:29 +0800 Subject: [PATCH 18/66] More tests --- .../snowbridge/pallets/system/src/tests.rs | 32 +++++++++++++++++-- 1 file changed, 30 insertions(+), 2 deletions(-) diff --git a/bridges/snowbridge/pallets/system/src/tests.rs b/bridges/snowbridge/pallets/system/src/tests.rs index 3920d1c7e2ad..22b02d51dab2 100644 --- a/bridges/snowbridge/pallets/system/src/tests.rs +++ b/bridges/snowbridge/pallets/system/src/tests.rs @@ -677,7 +677,7 @@ fn register_all_tokens_succeeds() { TokenInfo { location: Location::new(1, [Parachain(2004)]), metadata: AssetMetadata { - decimals: 10, + decimals: 12, name: b"GLMR".to_vec().try_into().unwrap(), symbol: b"GLMR".to_vec().try_into().unwrap(), }, @@ -690,7 +690,7 @@ fn register_all_tokens_succeeds() { TokenInfo { location: Location::new(1, [Parachain(1000), PalletInstance(50), GeneralIndex(1084)]), metadata: AssetMetadata { - decimals: 10, + decimals: 6, name: b"USDT".to_vec().try_into().unwrap(), symbol: b"USDT".to_vec().try_into().unwrap(), }, @@ -748,3 +748,31 @@ fn register_all_tokens_succeeds() { }); } } + +#[test] +fn register_ethereum_native_token_fails() { + new_test_ext(true).execute_with(|| { + let origin = RuntimeOrigin::root(); + let location = Location::new( + 2, + [ + GlobalConsensus(Ethereum { chain_id: 11155111 }), + AccountKey20 { + network: None, + key: hex!("87d1f7fdfEe7f651FaBc8bFCB6E086C278b77A7d"), + }, + ], + ); + let versioned_location: Box = Box::new(location.clone().into()); + let asset_metadata = AssetMetadata { + decimals: 18, + name: b"WETH".to_vec().try_into().unwrap(), + symbol: b"WETH".to_vec().try_into().unwrap(), + }; + + assert_noop!( + EthereumSystem::register_token(origin, versioned_location, asset_metadata), + Error::::LocationConversionFailed + ); + }); +} From ad9ad230c76a4ec7d387749071368519d46cbd04 Mon Sep 17 00:00:00 2001 From: ron Date: Sat, 7 Sep 2024 11:05:11 +0800 Subject: [PATCH 19/66] Fix typo --- bridges/snowbridge/pallets/system/src/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bridges/snowbridge/pallets/system/src/tests.rs b/bridges/snowbridge/pallets/system/src/tests.rs index 22b02d51dab2..d436724caba6 100644 --- a/bridges/snowbridge/pallets/system/src/tests.rs +++ b/bridges/snowbridge/pallets/system/src/tests.rs @@ -633,7 +633,7 @@ fn no_genesis_build_is_uninitialized() { } #[test] -fn register_token_with_signed_yeilds_bad_origin() { +fn register_token_with_signed_yields_bad_origin() { new_test_ext(true).execute_with(|| { let origin = RuntimeOrigin::signed([14; 32].into()); let location = Location::new(1, [Parachain(2000)]); From 388d1be8396925652212d52da6150eba5a32330f Mon Sep 17 00:00:00 2001 From: Ron Date: Sat, 7 Sep 2024 11:18:45 +0800 Subject: [PATCH 20/66] Update bridges/snowbridge/primitives/router/src/inbound/mod.rs Co-authored-by: Adrian Catangiu --- bridges/snowbridge/primitives/router/src/inbound/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/bridges/snowbridge/primitives/router/src/inbound/mod.rs b/bridges/snowbridge/primitives/router/src/inbound/mod.rs index 7935de40ac3d..4aa721728449 100644 --- a/bridges/snowbridge/primitives/router/src/inbound/mod.rs +++ b/bridges/snowbridge/primitives/router/src/inbound/mod.rs @@ -479,6 +479,7 @@ impl< // Forward message id to Asset Hub. instructions.push(SetTopic(message_id.into())); + // `total_fees` to burn on this chain when sending `instructions` to run on AH (which also teleport fees) Ok((instructions.into(), total_fees.into())) } } From 7f110c2d2d7721ec1ea2151f83226bc8ea61cd8d Mon Sep 17 00:00:00 2001 From: ron Date: Sat, 7 Sep 2024 11:26:19 +0800 Subject: [PATCH 21/66] Rename var --- bridges/snowbridge/pallets/system/src/lib.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/bridges/snowbridge/pallets/system/src/lib.rs b/bridges/snowbridge/pallets/system/src/lib.rs index 84b41cd0b9ea..fd2f2eb5b3ab 100644 --- a/bridges/snowbridge/pallets/system/src/lib.rs +++ b/bridges/snowbridge/pallets/system/src/lib.rs @@ -732,10 +732,11 @@ pub mod pallet { metadata: AssetMetadata, pays_fee: PaysFee, ) -> Result<(), DispatchError> { - let bridge_location = Location::new(2, [GlobalConsensus(T::EthereumNetwork::get())]); + let ethereum_location = Location::new(2, [GlobalConsensus(T::EthereumNetwork::get())]); + // reanchor to Ethereum context let location = location .clone() - .reanchored(&bridge_location, &T::UniversalLocation::get()) + .reanchored(ðereum_location, &T::UniversalLocation::get()) .map_err(|_| Error::::LocationConversionFailed)?; let token_id = TokenIdOf::convert_location(&location) From d0ac1bffe919226a49210177d6db32c4d1367991 Mon Sep 17 00:00:00 2001 From: Ron Date: Sat, 7 Sep 2024 11:26:42 +0800 Subject: [PATCH 22/66] Update bridges/snowbridge/primitives/core/src/location.rs Co-authored-by: Adrian Catangiu --- bridges/snowbridge/primitives/core/src/location.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bridges/snowbridge/primitives/core/src/location.rs b/bridges/snowbridge/primitives/core/src/location.rs index c126618bba35..07c998a5c40e 100644 --- a/bridges/snowbridge/primitives/core/src/location.rs +++ b/bridges/snowbridge/primitives/core/src/location.rs @@ -29,7 +29,7 @@ pub type AgentIdOf = pub type TokenId = H256; -/// Convert a token location to a stable ID that can be used on the Ethereum side +/// Convert a token location (relative to Ethereum) to a stable ID that can be used on the Ethereum side pub type TokenIdOf = HashedDescription< TokenId, DescribeGlobalPrefix<(DescribeTerminus, DescribeFamily)>, From d65711353dda3fefb6abda5a367c255beddb08d5 Mon Sep 17 00:00:00 2001 From: ron Date: Sat, 7 Sep 2024 11:38:50 +0800 Subject: [PATCH 23/66] Make test asset non sufficient --- .../chains/parachains/assets/asset-hub-westend/src/genesis.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs index 73c5ef863309..a9cfcda0dacd 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs @@ -86,7 +86,7 @@ pub fn genesis() -> Storage { ( PenpalBTeleportableAssetLocation::get(), PenpalBSiblingSovereignAccount::get(), - true, + false, ED, ), ], From 5be8f8224af731e551671cb5f7412e90b9413e46 Mon Sep 17 00:00:00 2001 From: ron Date: Sat, 7 Sep 2024 11:41:10 +0800 Subject: [PATCH 24/66] Bump package as minor --- prdoc/pr_5546.prdoc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/prdoc/pr_5546.prdoc b/prdoc/pr_5546.prdoc index 16e2bd993463..44c8ee177a6f 100644 --- a/prdoc/pr_5546.prdoc +++ b/prdoc/pr_5546.prdoc @@ -13,11 +13,11 @@ crates: - name: snowbridge-pallet-outbound-queue bump: patch - name: snowbridge-pallet-system - bump: major + bump: minor - name: snowbridge-core - bump: major + bump: minor - name: snowbridge-router-primitives - bump: major + bump: minor - name: bridge-hub-westend-runtime bump: patch - name: bridge-hub-rococo-runtime From 8d1c9d7b18e258ac77146b2dc81e879ce818104a Mon Sep 17 00:00:00 2001 From: ron Date: Sat, 7 Sep 2024 11:47:08 +0800 Subject: [PATCH 25/66] Improve comments --- bridges/snowbridge/pallets/system/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bridges/snowbridge/pallets/system/src/lib.rs b/bridges/snowbridge/pallets/system/src/lib.rs index fd2f2eb5b3ab..9712f589b808 100644 --- a/bridges/snowbridge/pallets/system/src/lib.rs +++ b/bridges/snowbridge/pallets/system/src/lib.rs @@ -266,12 +266,12 @@ pub mod pallet { pub type PricingParameters = StorageValue<_, PricingParametersOf, ValueQuery, T::DefaultPricingParameters>; - /// Lookup table for foreign to native token ID + /// Lookup table for foreign token ID to native location relative to ethereum #[pallet::storage] pub type ForeignToNativeId = StorageMap<_, Blake2_128Concat, TokenId, xcm::v4::Location, OptionQuery>; - /// Lookup table for native to foreign token ID + /// Lookup table for native location relative to ethereum to foreign token ID #[pallet::storage] pub type NativeToForeignId = StorageMap<_, Blake2_128Concat, xcm::v4::Location, TokenId, OptionQuery>; From 9761118c357a15666743a84507fea168d9580961 Mon Sep 17 00:00:00 2001 From: ron Date: Sat, 7 Sep 2024 16:13:18 +0800 Subject: [PATCH 26/66] Revert "Bump package as minor" This reverts commit 5be8f8224af731e551671cb5f7412e90b9413e46. --- prdoc/pr_5546.prdoc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/prdoc/pr_5546.prdoc b/prdoc/pr_5546.prdoc index 44c8ee177a6f..16e2bd993463 100644 --- a/prdoc/pr_5546.prdoc +++ b/prdoc/pr_5546.prdoc @@ -13,11 +13,11 @@ crates: - name: snowbridge-pallet-outbound-queue bump: patch - name: snowbridge-pallet-system - bump: minor + bump: major - name: snowbridge-core - bump: minor + bump: major - name: snowbridge-router-primitives - bump: minor + bump: major - name: bridge-hub-westend-runtime bump: patch - name: bridge-hub-rococo-runtime From 96fecc3cfcfa91dc797a94f225027a266215d6e5 Mon Sep 17 00:00:00 2001 From: clangenb <37865735+clangenb@users.noreply.github.com> Date: Sat, 7 Sep 2024 12:15:47 +0200 Subject: [PATCH 27/66] Fix occasional `alloc` not found error in `format_runtime_string!` (#5632) The macro hygiene for the `format_runtime_string!` macro was broken since https://github.com/paritytech/polkadot-sdk/pull/5010, which resulted in the following build error under certain circumstances: ```console error[E0433]: failed to resolve: use of undeclared crate or module `alloc` --> /home/clang/.cargo/registry/src/index.crates.io-6f17d22bba15001f/frame-benchmarking-36.0.0/src/v1.rs:1738:2 | 1738 | / sp_runtime::format_runtime_string!( 1739 | | "\n* Pallet: {}\n\ 1740 | | * Benchmark: {}\n\ 1741 | | * Components: {:?}\n\ ... | 1750 | | error_message, 1751 | | ) | |_____^ use of undeclared crate or module `alloc` | = note: this error originates in the macro `sp_runtime::format_runtime_string` (in Nightly builds, run with -Z macro-backtrace for more info) For more information about this error, try `rustc --explain E0433`. ``` This bug has been known already, but hasn't been fixed so far, see https://github.com/paritytech/polkadot-sdk/issues/5213 and https://substrate.stackexchange.com/questions/11786/use-of-undeclared-crate-or-module-alloc-when-upgrade-to-v1-13-0. I have made a mini rust crate that can reproduce the bug, and it also shows that this PR will fix the issue: https://github.com/clangenb/sp-runtime-string-test. --- prdoc/pr_5632.prdoc | 13 +++++++++++++ substrate/primitives/runtime/src/lib.rs | 4 +--- substrate/primitives/runtime/src/runtime_string.rs | 2 +- 3 files changed, 15 insertions(+), 4 deletions(-) create mode 100644 prdoc/pr_5632.prdoc diff --git a/prdoc/pr_5632.prdoc b/prdoc/pr_5632.prdoc new file mode 100644 index 000000000000..f76428bbc8f6 --- /dev/null +++ b/prdoc/pr_5632.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix `alloc` not found error in `format_runtime_string!` + +doc: + - audience: Runtime Dev + description: | + Fixes the macro hygiene in the `format_runtime_string!` macro to fix the `alloc` not found build error. + +crates: + - name: sp-runtime + bump: patch diff --git a/substrate/primitives/runtime/src/lib.rs b/substrate/primitives/runtime/src/lib.rs index ba1ea3769724..260c9a91855a 100644 --- a/substrate/primitives/runtime/src/lib.rs +++ b/substrate/primitives/runtime/src/lib.rs @@ -49,7 +49,7 @@ extern crate alloc; #[doc(hidden)] -pub use alloc::vec::Vec; +pub use alloc::{format, vec::Vec}; #[doc(hidden)] pub use codec; #[doc(hidden)] @@ -79,8 +79,6 @@ use sp_core::{ sr25519, }; -#[cfg(all(not(feature = "std"), feature = "serde"))] -use alloc::format; use alloc::vec; use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; diff --git a/substrate/primitives/runtime/src/runtime_string.rs b/substrate/primitives/runtime/src/runtime_string.rs index 71aacf07a762..bb0347badcbb 100644 --- a/substrate/primitives/runtime/src/runtime_string.rs +++ b/substrate/primitives/runtime/src/runtime_string.rs @@ -50,7 +50,7 @@ macro_rules! format_runtime_string { } #[cfg(not(feature = "std"))] { - sp_runtime::RuntimeString::Owned(alloc::format!($($args)*).as_bytes().to_vec()) + sp_runtime::RuntimeString::Owned($crate::format!($($args)*).as_bytes().to_vec()) } }}; } From 016421ac71574333da92a56ef7bcbef8621ccc14 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Molina=20Colmenero?= Date: Sat, 7 Sep 2024 23:42:02 +0200 Subject: [PATCH 28/66] Add debugging info for `StorageWeightReclaim` (#5594) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When inspecting the logs we often encounter the following message: `Benchmarked storage weight smaller than consumed storage weight. benchmarked: {benchmarked_weight} consumed: {consumed_weight} unspent: {unspent}` However, it is very hard to guess which call is causing the issue. With the changes proposed in this PR, information about the call is provided so that we can easily identify the source of the problem without further delay, and this way work more efficiently in solving the issue. --------- Co-authored-by: Bastian Köcher --- .../primitives/storage-weight-reclaim/src/lib.rs | 6 ++++-- prdoc/pr_5594.prdoc | 13 +++++++++++++ 2 files changed, 17 insertions(+), 2 deletions(-) create mode 100644 prdoc/pr_5594.prdoc diff --git a/cumulus/primitives/storage-weight-reclaim/src/lib.rs b/cumulus/primitives/storage-weight-reclaim/src/lib.rs index a557e881e26b..2529297691e8 100644 --- a/cumulus/primitives/storage-weight-reclaim/src/lib.rs +++ b/cumulus/primitives/storage-weight-reclaim/src/lib.rs @@ -183,13 +183,15 @@ where if consumed_weight > benchmarked_weight { log::error!( target: LOG_TARGET, - "Benchmarked storage weight smaller than consumed storage weight. benchmarked: {benchmarked_weight} consumed: {consumed_weight} unspent: {unspent}" + "Benchmarked storage weight smaller than consumed storage weight. extrinsic: {} benchmarked: {benchmarked_weight} consumed: {consumed_weight} unspent: {unspent}", + frame_system::Pallet::::extrinsic_index().unwrap_or(0) ); current.accrue(Weight::from_parts(0, storage_size_diff), info.class) } else { log::trace!( target: LOG_TARGET, - "Reclaiming storage weight. benchmarked: {benchmarked_weight}, consumed: {consumed_weight} unspent: {unspent}" + "Reclaiming storage weight. extrinsic: {} benchmarked: {benchmarked_weight} consumed: {consumed_weight} unspent: {unspent}", + frame_system::Pallet::::extrinsic_index().unwrap_or(0) ); current.reduce(Weight::from_parts(0, storage_size_diff), info.class) } diff --git a/prdoc/pr_5594.prdoc b/prdoc/pr_5594.prdoc new file mode 100644 index 000000000000..dbdc7937b73d --- /dev/null +++ b/prdoc/pr_5594.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Add debugging info for `StorageWeightReclaim`" + +doc: + - audience: Runtime Dev + description: | + - Includes extrinsic index to be displayed in the logs when the consumed weight is higher than the measured one. + +crates: + - name: cumulus-primitives-storage-weight-reclaim + bump: patch From 868a36bd186f3ef9535ebf7deceac1b2fab19fcb Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Sun, 8 Sep 2024 23:14:51 +0200 Subject: [PATCH 29/66] [pallet-revive] update generic runtime types (#5608) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit fix #5574 - Use U256 instead of BalanceOf and MomentOf in Ext trait - Enforce H256 for T::Hash The Ext trait still depends on the associated type `T: Config`, we can look into refactoring it even more later but even in the current state it should not influence how the data is encoded / decoded between the contract and the host ``` fn caller(&self) -> Origin; -> only use to extract the address of the caller fn account_id(&self) -> &AccountIdOf; -> only used to expose the address or access the account_id internally fn gas_meter(&self) -> &GasMeter; fn gas_meter_mut(&mut self) -> &mut GasMeter; -> encoding does not depend on T fn call_runtime(&self, call: ::RuntimeCall) -> DispatchResultWithPostInfo; -> Substrate specific, just an opaque blob of bytes from the contract's perspective fn contract_info(&mut self) -> &mut ContractInfo; fn transient_storage(&mut self) -> &mut TransientStorage; -> gated by #[cfg(any(test, feature = "runtime-benchmarks"))] ``` --------- Co-authored-by: Alexander Theißen --- prdoc/pr_5608.prdoc | 16 + substrate/bin/node/runtime/src/lib.rs | 5 - substrate/frame/revive/Cargo.toml | 1 - substrate/frame/revive/build.rs | 78 --- .../revive/fixtures/contracts/balance.rs | 9 +- .../frame/revive/fixtures/contracts/call.rs | 8 +- .../fixtures/contracts/call_return_code.rs | 10 +- .../contracts/call_runtime_and_call.rs | 8 +- .../contracts/call_with_flags_and_value.rs | 10 +- .../fixtures/contracts/call_with_limit.rs | 4 +- .../fixtures/contracts/caller_contract.rs | 4 +- .../contracts/chain_extension_temp_storage.rs | 8 +- .../fixtures/contracts/common/src/lib.rs | 27 + .../contracts/create_storage_and_call.rs | 4 +- .../create_storage_and_instantiate.rs | 6 +- .../create_transient_storage_and_call.rs | 2 +- .../fixtures/contracts/delegate_call_lib.rs | 7 +- .../contracts/destroy_and_transfer.rs | 4 +- .../frame/revive/fixtures/contracts/drain.rs | 11 +- .../contracts/event_and_return_on_deploy.rs | 3 +- .../revive/fixtures/contracts/event_size.rs | 3 +- .../contracts/instantiate_return_code.rs | 6 +- .../fixtures/contracts/read_only_call.rs | 8 +- .../revive/fixtures/contracts/recurse.rs | 8 +- .../fixtures/contracts/self_destruct.rs | 8 +- .../contracts/transfer_return_code.rs | 4 +- .../revive/src/benchmarking/call_builder.rs | 10 +- .../frame/revive/src/benchmarking/mod.rs | 194 ++---- substrate/frame/revive/src/exec.rs | 178 +++-- substrate/frame/revive/src/lib.rs | 164 ++--- substrate/frame/revive/src/migration.rs | 650 ------------------ substrate/frame/revive/src/primitives.rs | 2 - substrate/frame/revive/src/storage.rs | 8 +- substrate/frame/revive/src/storage/meter.rs | 10 +- .../frame/revive/src/test_utils/builder.rs | 3 + substrate/frame/revive/src/tests.rs | 103 +-- substrate/frame/revive/src/wasm/mod.rs | 21 +- substrate/frame/revive/src/wasm/runtime.rs | 170 ++--- substrate/frame/revive/src/weights.rs | 365 ---------- substrate/frame/revive/uapi/src/host.rs | 61 +- .../frame/revive/uapi/src/host/riscv32.rs | 94 +-- 41 files changed, 507 insertions(+), 1788 deletions(-) create mode 100644 prdoc/pr_5608.prdoc delete mode 100644 substrate/frame/revive/build.rs delete mode 100644 substrate/frame/revive/src/migration.rs diff --git a/prdoc/pr_5608.prdoc b/prdoc/pr_5608.prdoc new file mode 100644 index 000000000000..9a0748e46bab --- /dev/null +++ b/prdoc/pr_5608.prdoc @@ -0,0 +1,16 @@ +title: "[pallet-revive] update runtime types" + +doc: + - audience: Runtime Dev + description: | + Refactor the Ext trait to use U256 instead of BalanceOf or MomentOf + +crates: + - name: pallet-revive + bump: major + - name: pallet-revive-uapi + bump: patch + - name: pallet-revive-fixtures + bump: patch + + diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 6ae04902aa82..caebd63408db 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -1417,10 +1417,6 @@ impl pallet_revive::Config for Runtime { type UploadOrigin = EnsureSigned; type InstantiateOrigin = EnsureSigned; type RuntimeHoldReason = RuntimeHoldReason; - #[cfg(not(feature = "runtime-benchmarks"))] - type Migrations = (); - #[cfg(feature = "runtime-benchmarks")] - type Migrations = pallet_revive::migration::codegen::BenchMigrations; type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; type Debug = (); type Xcm = (); @@ -2592,7 +2588,6 @@ type Migrations = ( pallet_nomination_pools::migration::versioned::V6ToV7, pallet_alliance::migration::Migration, pallet_contracts::Migration, - pallet_revive::Migration, pallet_identity::migration::versioned::V0ToV1, ); diff --git a/substrate/frame/revive/Cargo.toml b/substrate/frame/revive/Cargo.toml index 6b7542e89202..667328ac2d0d 100644 --- a/substrate/frame/revive/Cargo.toml +++ b/substrate/frame/revive/Cargo.toml @@ -3,7 +3,6 @@ name = "pallet-revive" version = "0.1.0" authors.workspace = true edition.workspace = true -build = "build.rs" license = "Apache-2.0" homepage.workspace = true repository.workspace = true diff --git a/substrate/frame/revive/build.rs b/substrate/frame/revive/build.rs deleted file mode 100644 index ca8e62df6047..000000000000 --- a/substrate/frame/revive/build.rs +++ /dev/null @@ -1,78 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::io::Write; - -/// We start with version 2 instead of 0 when adding the pallet. -/// -/// Because otherwise we can't test any migrations since they require the storage version -/// to be lower than the pallet version in order to be triggerd. With the pallet version -/// at the minimum (0) this would not work. -const LOWEST_STORAGE_VERSION: u16 = 2; - -/// Get the latest migration version. -/// -/// Find the highest version number from the available migration files. -/// Each migration file should follow the naming convention `vXX.rs`, where `XX` is the version -/// number. -fn get_latest_version() -> u16 { - let Ok(dir) = std::fs::read_dir("src/migration") else { return LOWEST_STORAGE_VERSION }; - dir.filter_map(|entry| { - let file_name = entry.as_ref().ok()?.file_name(); - let file_name = file_name.to_str()?; - if file_name.starts_with('v') && file_name.ends_with(".rs") { - let version = &file_name[1..&file_name.len() - 3]; - let version = version.parse::().ok()?; - - // Ensure that the version matches the one defined in the file. - let path = entry.unwrap().path(); - let file_content = std::fs::read_to_string(&path).ok()?; - assert!( - file_content.contains(&format!("const VERSION: u16 = {}", version)), - "Invalid MigrationStep::VERSION in {:?}", - path - ); - - return Some(version) - } - None - }) - .max() - .unwrap_or(LOWEST_STORAGE_VERSION) -} - -/// Generates a module that exposes the latest migration version, and the benchmark migrations type. -fn main() -> Result<(), Box> { - let out_dir = std::env::var("OUT_DIR")?; - let path = std::path::Path::new(&out_dir).join("migration_codegen.rs"); - let mut f = std::fs::File::create(path)?; - let version = get_latest_version(); - write!( - f, - " - pub mod codegen {{ - use crate::NoopMigration; - /// The latest migration version, pulled from the latest migration file. - pub const LATEST_MIGRATION_VERSION: u16 = {version}; - /// The Migration Steps used for benchmarking the migration framework. - pub type BenchMigrations = (NoopMigration<{}>, NoopMigration<{version}>); - }}", - version - 1, - )?; - - Ok(()) -} diff --git a/substrate/frame/revive/fixtures/contracts/balance.rs b/substrate/frame/revive/fixtures/contracts/balance.rs index 4011b8379cbf..4606135d9807 100644 --- a/substrate/frame/revive/fixtures/contracts/balance.rs +++ b/substrate/frame/revive/fixtures/contracts/balance.rs @@ -18,7 +18,7 @@ #![no_std] #![no_main] -use common::output; +use common::u64_output; use uapi::{HostFn, HostFnImpl as api}; #[no_mangle] @@ -28,9 +28,6 @@ pub extern "C" fn deploy() {} #[no_mangle] #[polkavm_derive::polkavm_export] pub extern "C" fn call() { - // Initialize buffer with 1s so that we can check that it is overwritten. - output!(balance, [1u8; 8], api::balance,); - - // Assert that the balance is 0. - assert_eq!(&[0u8; 8], balance); + let balance = u64_output!(api::balance,); + assert_eq!(balance, 0); } diff --git a/substrate/frame/revive/fixtures/contracts/call.rs b/substrate/frame/revive/fixtures/contracts/call.rs index 93687441fa50..ee51548879d9 100644 --- a/substrate/frame/revive/fixtures/contracts/call.rs +++ b/substrate/frame/revive/fixtures/contracts/call.rs @@ -38,10 +38,10 @@ pub extern "C" fn call() { api::call( uapi::CallFlags::empty(), callee_addr, - 0u64, // How much ref_time to devote for the execution. 0 = all. - 0u64, // How much proof_size to devote for the execution. 0 = all. - None, // No deposit limit. - &0u64.to_le_bytes(), // Value transferred to the contract. + 0u64, // How much ref_time to devote for the execution. 0 = all. + 0u64, // How much proof_size to devote for the execution. 0 = all. + None, // No deposit limit. + &[0u8; 32], // Value transferred to the contract. callee_input, None, ) diff --git a/substrate/frame/revive/fixtures/contracts/call_return_code.rs b/substrate/frame/revive/fixtures/contracts/call_return_code.rs index 29b77c343fe9..25370459acb4 100644 --- a/substrate/frame/revive/fixtures/contracts/call_return_code.rs +++ b/substrate/frame/revive/fixtures/contracts/call_return_code.rs @@ -21,7 +21,7 @@ #![no_std] #![no_main] -use common::input; +use common::{input, u256_bytes}; use uapi::{HostFn, HostFnImpl as api}; #[no_mangle] @@ -41,10 +41,10 @@ pub extern "C" fn call() { let err_code = match api::call( uapi::CallFlags::empty(), callee_addr, - 0u64, // How much ref_time to devote for the execution. 0 = all. - 0u64, // How much proof_size to devote for the execution. 0 = all. - None, // No deposit limit. - &100u64.to_le_bytes(), // Value transferred to the contract. + 0u64, // How much ref_time to devote for the execution. 0 = all. + 0u64, // How much proof_size to devote for the execution. 0 = all. + None, // No deposit limit. + &u256_bytes(100u64), // Value transferred to the contract. input, None, ) { diff --git a/substrate/frame/revive/fixtures/contracts/call_runtime_and_call.rs b/substrate/frame/revive/fixtures/contracts/call_runtime_and_call.rs index 7cd46849655f..8c8aee962849 100644 --- a/substrate/frame/revive/fixtures/contracts/call_runtime_and_call.rs +++ b/substrate/frame/revive/fixtures/contracts/call_runtime_and_call.rs @@ -42,10 +42,10 @@ pub extern "C" fn call() { api::call( uapi::CallFlags::empty(), callee_addr, - 0u64, // How much ref_time to devote for the execution. 0 = all. - 0u64, // How much proof_size to devote for the execution. 0 = all. - None, // No deposit limit. - &0u64.to_le_bytes(), // Value transferred to the contract. + 0u64, // How much ref_time to devote for the execution. 0 = all. + 0u64, // How much proof_size to devote for the execution. 0 = all. + None, // No deposit limit. + &[0u8; 32], // Value transferred to the contract. callee_input, None, ) diff --git a/substrate/frame/revive/fixtures/contracts/call_with_flags_and_value.rs b/substrate/frame/revive/fixtures/contracts/call_with_flags_and_value.rs index c3204c29281c..330393e706e9 100644 --- a/substrate/frame/revive/fixtures/contracts/call_with_flags_and_value.rs +++ b/substrate/frame/revive/fixtures/contracts/call_with_flags_and_value.rs @@ -19,7 +19,7 @@ #![no_std] #![no_main] -use common::input; +use common::{input, u256_bytes}; use uapi::{HostFn, HostFnImpl as api}; #[no_mangle] @@ -40,10 +40,10 @@ pub extern "C" fn call() { api::call( uapi::CallFlags::from_bits(flags).unwrap(), callee_addr, - 0u64, // How much ref_time to devote for the execution. 0 = all. - 0u64, // How much proof_size to devote for the execution. 0 = all. - None, // No deposit limit. - &value.to_le_bytes(), // Value transferred to the contract. + 0u64, // How much ref_time to devote for the execution. 0 = all. + 0u64, // How much proof_size to devote for the execution. 0 = all. + None, // No deposit limit. + &u256_bytes(value), // Value transferred to the contract. forwarded_input, None, ) diff --git a/substrate/frame/revive/fixtures/contracts/call_with_limit.rs b/substrate/frame/revive/fixtures/contracts/call_with_limit.rs index a941aa9a3421..6ab892a6b7ae 100644 --- a/substrate/frame/revive/fixtures/contracts/call_with_limit.rs +++ b/substrate/frame/revive/fixtures/contracts/call_with_limit.rs @@ -43,8 +43,8 @@ pub extern "C" fn call() { callee_addr, ref_time, proof_size, - None, // No deposit limit. - &0u64.to_le_bytes(), // value transferred to the contract. + None, // No deposit limit. + &[0u8; 32], // value transferred to the contract. forwarded_input, None, ) diff --git a/substrate/frame/revive/fixtures/contracts/caller_contract.rs b/substrate/frame/revive/fixtures/contracts/caller_contract.rs index 3b83f208d623..eb29fca87c15 100644 --- a/substrate/frame/revive/fixtures/contracts/caller_contract.rs +++ b/substrate/frame/revive/fixtures/contracts/caller_contract.rs @@ -18,7 +18,7 @@ #![no_std] #![no_main] -use common::input; +use common::{input, u256_bytes}; use uapi::{HostFn, HostFnImpl as api, ReturnErrorCode}; #[no_mangle] @@ -32,7 +32,7 @@ pub extern "C" fn call() { // The value to transfer on instantiation and calls. Chosen to be greater than existential // deposit. - let value = 32768u64.to_le_bytes(); + let value = u256_bytes(32768u64); let salt = [0u8; 32]; // Callee will use the first 4 bytes of the input to return an exit status. diff --git a/substrate/frame/revive/fixtures/contracts/chain_extension_temp_storage.rs b/substrate/frame/revive/fixtures/contracts/chain_extension_temp_storage.rs index bb5c1ccbc1d6..22d6c5b548d8 100644 --- a/substrate/frame/revive/fixtures/contracts/chain_extension_temp_storage.rs +++ b/substrate/frame/revive/fixtures/contracts/chain_extension_temp_storage.rs @@ -54,10 +54,10 @@ pub extern "C" fn call() { api::call( uapi::CallFlags::ALLOW_REENTRY, &addr, - 0u64, // How much ref_time to devote for the execution. 0 = all. - 0u64, // How much proof_size to devote for the execution. 0 = all. - None, // No deposit limit. - &0u64.to_le_bytes(), // Value transferred to the contract. + 0u64, // How much ref_time to devote for the execution. 0 = all. + 0u64, // How much proof_size to devote for the execution. 0 = all. + None, // No deposit limit. + &[0u8; 32], // Value transferred to the contract. input, None, ) diff --git a/substrate/frame/revive/fixtures/contracts/common/src/lib.rs b/substrate/frame/revive/fixtures/contracts/common/src/lib.rs index 947247e9cf74..abfba282bec1 100644 --- a/substrate/frame/revive/fixtures/contracts/common/src/lib.rs +++ b/substrate/frame/revive/fixtures/contracts/common/src/lib.rs @@ -167,3 +167,30 @@ macro_rules! unwrap_output { $host_fn($($arg,)* $output).unwrap(); }; } + +/// Call the host function and convert the [u8; 32] output to u64. +#[macro_export] +macro_rules! u64_output { + ($host_fn:path, $($arg:expr),*) => {{ + let mut buffer = [1u8; 32]; + $host_fn($($arg,)* &mut buffer); + assert!(buffer[8..].iter().all(|&x| x == 0)); + u64::from_le_bytes(buffer[..8].try_into().unwrap()) + }}; +} + +/// Convert a u64 into a [u8; 32]. +pub const fn u256_bytes(value: u64) -> [u8; 32] { + let mut buffer = [0u8; 32]; + let bytes = value.to_le_bytes(); + + buffer[0] = bytes[0]; + buffer[1] = bytes[1]; + buffer[2] = bytes[2]; + buffer[3] = bytes[3]; + buffer[4] = bytes[4]; + buffer[5] = bytes[5]; + buffer[6] = bytes[6]; + buffer[7] = bytes[7]; + buffer +} diff --git a/substrate/frame/revive/fixtures/contracts/create_storage_and_call.rs b/substrate/frame/revive/fixtures/contracts/create_storage_and_call.rs index 28d161791e5b..4fa2db0c8c1c 100644 --- a/substrate/frame/revive/fixtures/contracts/create_storage_and_call.rs +++ b/substrate/frame/revive/fixtures/contracts/create_storage_and_call.rs @@ -33,7 +33,7 @@ pub extern "C" fn call() { buffer, input: [u8; 4], callee: &[u8; 20], - deposit_limit: [u8; 8], + deposit_limit: &[u8; 32], ); // create 4 byte of storage before calling @@ -46,7 +46,7 @@ pub extern "C" fn call() { 0u64, // How much ref_time weight to devote for the execution. 0 = all. 0u64, // How much proof_size weight to devote for the execution. 0 = all. Some(deposit_limit), - &0u64.to_le_bytes(), // Value transferred to the contract. + &[0u8; 32], // Value transferred to the contract. input, None, ) diff --git a/substrate/frame/revive/fixtures/contracts/create_storage_and_instantiate.rs b/substrate/frame/revive/fixtures/contracts/create_storage_and_instantiate.rs index d87c2e8cd35a..e1372e2eb8b6 100644 --- a/substrate/frame/revive/fixtures/contracts/create_storage_and_instantiate.rs +++ b/substrate/frame/revive/fixtures/contracts/create_storage_and_instantiate.rs @@ -19,7 +19,7 @@ #![no_std] #![no_main] -use common::input; +use common::{input, u256_bytes}; use uapi::{HostFn, HostFnImpl as api}; #[no_mangle] @@ -32,10 +32,10 @@ pub extern "C" fn call() { input!( input: [u8; 4], code_hash: &[u8; 32], - deposit_limit: [u8; 8], + deposit_limit: &[u8; 32], ); - let value = 10_000u64.to_le_bytes(); + let value = u256_bytes(10_000u64); let salt = [0u8; 32]; let mut address = [0u8; 20]; diff --git a/substrate/frame/revive/fixtures/contracts/create_transient_storage_and_call.rs b/substrate/frame/revive/fixtures/contracts/create_transient_storage_and_call.rs index 753490cf26b7..d2efb26e5ceb 100644 --- a/substrate/frame/revive/fixtures/contracts/create_transient_storage_and_call.rs +++ b/substrate/frame/revive/fixtures/contracts/create_transient_storage_and_call.rs @@ -52,7 +52,7 @@ pub extern "C" fn call() { 0u64, // How much ref_time weight to devote for the execution. 0 = all. 0u64, // How much proof_size weight to devote for the execution. 0 = all. None, - &0u64.to_le_bytes(), // Value transferred to the contract. + &[0u8; 32], // Value transferred to the contract. input, None, ) diff --git a/substrate/frame/revive/fixtures/contracts/delegate_call_lib.rs b/substrate/frame/revive/fixtures/contracts/delegate_call_lib.rs index c5525423a9ee..95c1bd2aa6cd 100644 --- a/substrate/frame/revive/fixtures/contracts/delegate_call_lib.rs +++ b/substrate/frame/revive/fixtures/contracts/delegate_call_lib.rs @@ -18,7 +18,7 @@ #![no_std] #![no_main] -use common::output; +use common::u64_output; use uapi::{HostFn, HostFnImpl as api, StorageFlags}; #[no_mangle] @@ -39,9 +39,8 @@ pub extern "C" fn call() { // Assert that `value_transferred` is equal to the value // passed to the `caller` contract: 1337. - output!(value_transferred, [0u8; 8], api::value_transferred,); - let value_transferred = u64::from_le_bytes(value_transferred[..].try_into().unwrap()); - assert_eq!(value_transferred, 1337); + let value = u64_output!(api::value_transferred,); + assert_eq!(value, 1337); // Assert that ALICE is the caller of the contract. let mut caller = [0u8; 20]; diff --git a/substrate/frame/revive/fixtures/contracts/destroy_and_transfer.rs b/substrate/frame/revive/fixtures/contracts/destroy_and_transfer.rs index 4959a5e2e0ce..d381db8e398f 100644 --- a/substrate/frame/revive/fixtures/contracts/destroy_and_transfer.rs +++ b/substrate/frame/revive/fixtures/contracts/destroy_and_transfer.rs @@ -18,11 +18,11 @@ #![no_std] #![no_main] -use common::input; +use common::{input, u256_bytes}; use uapi::{HostFn, HostFnImpl as api, StorageFlags}; const ADDRESS_KEY: [u8; 32] = [0u8; 32]; -const VALUE: [u8; 8] = [0, 0, 1u8, 0, 0, 0, 0, 0]; +const VALUE: [u8; 32] = u256_bytes(65536); #[no_mangle] #[polkavm_derive::polkavm_export] diff --git a/substrate/frame/revive/fixtures/contracts/drain.rs b/substrate/frame/revive/fixtures/contracts/drain.rs index b46d4f7c8418..0d644a4238c4 100644 --- a/substrate/frame/revive/fixtures/contracts/drain.rs +++ b/substrate/frame/revive/fixtures/contracts/drain.rs @@ -18,7 +18,7 @@ #![no_std] #![no_main] -use common::output; +use common::{u256_bytes, u64_output}; use uapi::{HostFn, HostFnImpl as api}; #[no_mangle] @@ -28,17 +28,14 @@ pub extern "C" fn deploy() {} #[no_mangle] #[polkavm_derive::polkavm_export] pub extern "C" fn call() { - output!(balance, [0u8; 8], api::balance,); - let balance = u64::from_le_bytes(balance[..].try_into().unwrap()); - - output!(minimum_balance, [0u8; 8], api::minimum_balance,); - let minimum_balance = u64::from_le_bytes(minimum_balance[..].try_into().unwrap()); + let balance = u64_output!(api::balance,); + let minimum_balance = u64_output!(api::minimum_balance,); // Make the transferred value exceed the balance by adding the minimum balance. let balance = balance + minimum_balance; // Try to self-destruct by sending more balance to the 0 address. // The call will fail because a contract transfer has a keep alive requirement. - let res = api::transfer(&[0u8; 20], &balance.to_le_bytes()); + let res = api::transfer(&[0u8; 20], &u256_bytes(balance)); assert!(matches!(res, Err(uapi::ReturnErrorCode::TransferFailed))); } diff --git a/substrate/frame/revive/fixtures/contracts/event_and_return_on_deploy.rs b/substrate/frame/revive/fixtures/contracts/event_and_return_on_deploy.rs index 9186835d2911..5c438c1a75a1 100644 --- a/substrate/frame/revive/fixtures/contracts/event_and_return_on_deploy.rs +++ b/substrate/frame/revive/fixtures/contracts/event_and_return_on_deploy.rs @@ -25,7 +25,8 @@ use uapi::{HostFn, HostFnImpl as api}; #[polkavm_derive::polkavm_export] pub extern "C" fn deploy() { let buffer = [1u8, 2, 3, 4]; - api::deposit_event(&[0u8; 0], &buffer); + let topics = [[42u8; 32]; 1]; + api::deposit_event(&topics, &buffer); api::return_value(uapi::ReturnFlags::empty(), &buffer); } diff --git a/substrate/frame/revive/fixtures/contracts/event_size.rs b/substrate/frame/revive/fixtures/contracts/event_size.rs index 2b56de4bd3fd..7f04ae42765a 100644 --- a/substrate/frame/revive/fixtures/contracts/event_size.rs +++ b/substrate/frame/revive/fixtures/contracts/event_size.rs @@ -33,6 +33,7 @@ pub extern "C" fn call() { input!(len: u32,); let data = &BUFFER[..len as usize]; + let topics = [[0u8; 32]; 0]; - api::deposit_event(&[0u8; 0], data); + api::deposit_event(&topics, data); } diff --git a/substrate/frame/revive/fixtures/contracts/instantiate_return_code.rs b/substrate/frame/revive/fixtures/contracts/instantiate_return_code.rs index a81ffea943d4..c5736850960a 100644 --- a/substrate/frame/revive/fixtures/contracts/instantiate_return_code.rs +++ b/substrate/frame/revive/fixtures/contracts/instantiate_return_code.rs @@ -18,7 +18,7 @@ #![no_std] #![no_main] -use common::input; +use common::{input, u256_bytes}; use uapi::{HostFn, HostFnImpl as api}; #[no_mangle] @@ -36,8 +36,8 @@ pub extern "C" fn call() { 0u64, // How much ref_time weight to devote for the execution. 0 = all. 0u64, /* How much proof_size weight to devote for the execution. 0 = * all. */ - None, // No deposit limit. - &10_000u64.to_le_bytes(), // Value to transfer. + None, // No deposit limit. + &u256_bytes(10_000u64), // Value to transfer. input, None, None, diff --git a/substrate/frame/revive/fixtures/contracts/read_only_call.rs b/substrate/frame/revive/fixtures/contracts/read_only_call.rs index 7476b7a8366d..ea74d56867f5 100644 --- a/substrate/frame/revive/fixtures/contracts/read_only_call.rs +++ b/substrate/frame/revive/fixtures/contracts/read_only_call.rs @@ -39,10 +39,10 @@ pub extern "C" fn call() { api::call( uapi::CallFlags::READ_ONLY, callee_addr, - 0u64, // How much ref_time to devote for the execution. 0 = all. - 0u64, // How much proof_size to devote for the execution. 0 = all. - None, // No deposit limit. - &0u64.to_le_bytes(), // Value transferred to the contract. + 0u64, // How much ref_time to devote for the execution. 0 = all. + 0u64, // How much proof_size to devote for the execution. 0 = all. + None, // No deposit limit. + &[0u8; 32], // Value transferred to the contract. callee_input, None, ) diff --git a/substrate/frame/revive/fixtures/contracts/recurse.rs b/substrate/frame/revive/fixtures/contracts/recurse.rs index c15784b7f245..2e70d67d8c73 100644 --- a/substrate/frame/revive/fixtures/contracts/recurse.rs +++ b/substrate/frame/revive/fixtures/contracts/recurse.rs @@ -43,10 +43,10 @@ pub extern "C" fn call() { api::call( uapi::CallFlags::ALLOW_REENTRY, &addr, - 0u64, // How much ref_time to devote for the execution. 0 = all. - 0u64, // How much deposit_limit to devote for the execution. 0 = all. - None, // No deposit limit. - &0u64.to_le_bytes(), // Value transferred to the contract. + 0u64, // How much ref_time to devote for the execution. 0 = all. + 0u64, // How much deposit_limit to devote for the execution. 0 = all. + None, // No deposit limit. + &[0u8; 32], // Value transferred to the contract. &(calls_left - 1).to_le_bytes(), None, ) diff --git a/substrate/frame/revive/fixtures/contracts/self_destruct.rs b/substrate/frame/revive/fixtures/contracts/self_destruct.rs index 0e1e4d30e6f3..524979991ec7 100644 --- a/substrate/frame/revive/fixtures/contracts/self_destruct.rs +++ b/substrate/frame/revive/fixtures/contracts/self_destruct.rs @@ -42,10 +42,10 @@ pub extern "C" fn call() { api::call( uapi::CallFlags::ALLOW_REENTRY, &addr, - 0u64, // How much ref_time to devote for the execution. 0 = all. - 0u64, // How much proof_size to devote for the execution. 0 = all. - None, // No deposit limit. - &0u64.to_le_bytes(), // Value to transfer. + 0u64, // How much ref_time to devote for the execution. 0 = all. + 0u64, // How much proof_size to devote for the execution. 0 = all. + None, // No deposit limit. + &[0u8; 32], // Value to transfer. &[0u8; 0], None, ) diff --git a/substrate/frame/revive/fixtures/contracts/transfer_return_code.rs b/substrate/frame/revive/fixtures/contracts/transfer_return_code.rs index 3e1f2757c27a..bfeca9b8b4a4 100644 --- a/substrate/frame/revive/fixtures/contracts/transfer_return_code.rs +++ b/substrate/frame/revive/fixtures/contracts/transfer_return_code.rs @@ -18,7 +18,7 @@ #![no_std] #![no_main] -extern crate common; +use common::u256_bytes; use uapi::{HostFn, HostFnImpl as api}; #[no_mangle] @@ -28,7 +28,7 @@ pub extern "C" fn deploy() {} #[no_mangle] #[polkavm_derive::polkavm_export] pub extern "C" fn call() { - let ret_code = match api::transfer(&[0u8; 20], &100u64.to_le_bytes()) { + let ret_code = match api::transfer(&[0u8; 20], &u256_bytes(100u64)) { Ok(_) => 0u32, Err(code) => code as u32, }; diff --git a/substrate/frame/revive/src/benchmarking/call_builder.rs b/substrate/frame/revive/src/benchmarking/call_builder.rs index c000817a8a39..020a578c3a3a 100644 --- a/substrate/frame/revive/src/benchmarking/call_builder.rs +++ b/substrate/frame/revive/src/benchmarking/call_builder.rs @@ -22,12 +22,14 @@ use crate::{ storage::meter::Meter, transient_storage::MeterEntry, wasm::{ApiVersion, PreparedCall, Runtime}, - BalanceOf, Config, DebugBuffer, Error, GasMeter, Origin, TypeInfo, WasmBlob, Weight, + BalanceOf, Config, DebugBuffer, Error, GasMeter, MomentOf, Origin, TypeInfo, WasmBlob, Weight, }; use alloc::{vec, vec::Vec}; use codec::{Encode, HasCompact}; use core::fmt::Debug; use frame_benchmarking::benchmarking; +use frame_support::traits::IsType; +use sp_core::{H256, U256}; type StackExt<'a, T> = Stack<'a, T, WasmBlob>; @@ -48,6 +50,9 @@ impl Default for CallSetup where T: Config + pallet_balances::Config, as HasCompact>::Type: Clone + Eq + PartialEq + Debug + TypeInfo + Encode, + BalanceOf: Into + TryFrom, + T::Hash: IsType, + MomentOf: Into, { fn default() -> Self { Self::new(WasmModule::dummy()) @@ -57,7 +62,10 @@ where impl CallSetup where T: Config + pallet_balances::Config, + T::Hash: IsType, as HasCompact>::Type: Clone + Eq + PartialEq + Debug + TypeInfo + Encode, + BalanceOf: Into + TryFrom, + MomentOf: Into, { /// Setup a new call for the given module. pub fn new(module: WasmModule) -> Self { diff --git a/substrate/frame/revive/src/benchmarking/mod.rs b/substrate/frame/revive/src/benchmarking/mod.rs index 2c5285622843..8cdd7da5db9d 100644 --- a/substrate/frame/revive/src/benchmarking/mod.rs +++ b/substrate/frame/revive/src/benchmarking/mod.rs @@ -25,7 +25,6 @@ use self::{call_builder::CallSetup, code::WasmModule}; use crate::{ exec::{Key, MomentOf}, limits, - migration::codegen::LATEST_MIGRATION_VERSION, storage::WriteOutcome, Pallet as Contracts, *, }; @@ -34,7 +33,6 @@ use codec::{Encode, MaxEncodedLen}; use frame_benchmarking::v2::*; use frame_support::{ self, assert_ok, - pallet_prelude::StorageVersion, storage::child, traits::{fungible::InspectHold, Currency}, weights::{Weight, WeightMeter}, @@ -65,14 +63,21 @@ const UNBALANCED_TRIE_LAYERS: u32 = 20; struct Contract { caller: T::AccountId, account_id: T::AccountId, - addr: T::AccountId, } impl Contract where T: Config + pallet_balances::Config, as HasCompact>::Type: Clone + Eq + PartialEq + Debug + TypeInfo + Encode, + BalanceOf: Into + TryFrom, + MomentOf: Into, + T::Hash: IsType, { + /// Returns the address of the contract. + fn address(&self) -> H160 { + T::AddressMapper::to_address(&self.account_id) + } + /// Create new contract and use a default account id as instantiator. fn new(module: WasmModule, data: Vec) -> Result, &'static str> { Self::with_index(0, module, data) @@ -110,7 +115,7 @@ where let address = outcome.result?.addr; let account_id = T::AddressMapper::to_account_id_contract(&address); - let result = Contract { caller, account_id: account_id.clone(), addr: account_id }; + let result = Contract { caller, account_id: account_id.clone() }; ContractInfoOf::::insert(&address, result.info()?); @@ -216,9 +221,12 @@ fn default_deposit_limit() -> BalanceOf { #[benchmarks( where - as codec::HasCompact>::Type: Clone + Eq + PartialEq + core::fmt::Debug + scale_info::TypeInfo + codec::Encode, + as HasCompact>::Type: Clone + Eq + PartialEq + Debug + TypeInfo + Encode, + BalanceOf: From< as Currency>::Balance> + Into + TryFrom, T: Config + pallet_balances::Config, - BalanceOf: From< as Currency>::Balance>, + MomentOf: Into, + ::RuntimeEvent: From>, + T::Hash: IsType, as Currency>::Balance: From>, )] mod benchmarks { @@ -246,73 +254,6 @@ mod benchmarks { Ok(()) } - // This benchmarks the weight of executing Migration::migrate to execute a noop migration. - #[benchmark(pov_mode = Measured)] - fn migration_noop() { - let version = LATEST_MIGRATION_VERSION; - StorageVersion::new(version).put::>(); - #[block] - { - Migration::::migrate(&mut WeightMeter::new()); - } - assert_eq!(StorageVersion::get::>(), version); - } - - // This benchmarks the weight of dispatching migrate to execute 1 `NoopMigration` - #[benchmark(pov_mode = Measured)] - fn migrate() { - let latest_version = LATEST_MIGRATION_VERSION; - StorageVersion::new(latest_version - 2).put::>(); - as frame_support::traits::OnRuntimeUpgrade>::on_runtime_upgrade(); - - #[extrinsic_call] - _(RawOrigin::Signed(whitelisted_caller()), Weight::MAX); - - assert_eq!(StorageVersion::get::>(), latest_version - 1); - } - - // This benchmarks the weight of running on_runtime_upgrade when there are no migration in - // progress. - #[benchmark(pov_mode = Measured)] - fn on_runtime_upgrade_noop() { - let latest_version = LATEST_MIGRATION_VERSION; - StorageVersion::new(latest_version).put::>(); - #[block] - { - as frame_support::traits::OnRuntimeUpgrade>::on_runtime_upgrade(); - } - assert!(MigrationInProgress::::get().is_none()); - } - - // This benchmarks the weight of running on_runtime_upgrade when there is a migration in - // progress. - #[benchmark(pov_mode = Measured)] - fn on_runtime_upgrade_in_progress() { - let latest_version = LATEST_MIGRATION_VERSION; - StorageVersion::new(latest_version - 2).put::>(); - let v = vec![42u8].try_into().ok(); - MigrationInProgress::::set(v.clone()); - #[block] - { - as frame_support::traits::OnRuntimeUpgrade>::on_runtime_upgrade(); - } - assert!(MigrationInProgress::::get().is_some()); - assert_eq!(MigrationInProgress::::get(), v); - } - - // This benchmarks the weight of running on_runtime_upgrade when there is a migration to - // process. - #[benchmark(pov_mode = Measured)] - fn on_runtime_upgrade() { - let latest_version = LATEST_MIGRATION_VERSION; - StorageVersion::new(latest_version - 2).put::>(); - #[block] - { - as frame_support::traits::OnRuntimeUpgrade>::on_runtime_upgrade(); - } - assert!(MigrationInProgress::::get().is_some()); - } - // This benchmarks the overhead of loading a code of size `c` byte from storage and into // the execution engine. This does **not** include the actual execution for which the gas meter // is responsible. This is achieved by generating all code to the `deploy` function @@ -326,7 +267,7 @@ mod benchmarks { let instance = Contract::::with_caller(whitelisted_caller(), WasmModule::sized(c), vec![])?; let value = Pallet::::min_balance(); - let callee = T::AddressMapper::to_address(&instance.addr); + let callee = T::AddressMapper::to_address(&instance.account_id); let storage_deposit = default_deposit_limit::(); #[extrinsic_call] @@ -434,7 +375,7 @@ mod benchmarks { Contract::::with_caller(whitelisted_caller(), WasmModule::dummy(), vec![])?; let value = Pallet::::min_balance(); let origin = RawOrigin::Signed(instance.caller.clone()); - let callee = T::AddressMapper::to_address(&instance.addr); + let callee = T::AddressMapper::to_address(&instance.account_id); let before = T::Currency::balance(&instance.account_id); let storage_deposit = default_deposit_limit::(); #[extrinsic_call] @@ -510,7 +451,7 @@ mod benchmarks { let storage_deposit = default_deposit_limit::(); let hash = >::bare_upload_code(origin.into(), code, storage_deposit)?.code_hash; - let callee = T::AddressMapper::to_address(&instance.addr); + let callee = T::AddressMapper::to_address(&instance.account_id); assert_ne!(instance.info()?.code_hash, hash); #[extrinsic_call] _(RawOrigin::Root, callee, hash); @@ -661,85 +602,67 @@ mod benchmarks { #[benchmark(pov_mode = Measured)] fn seal_balance() { - let len = ::max_encoded_len() as u32; - build_runtime!(runtime, memory: [len.to_le_bytes(), vec![0u8; len as _], ]); + build_runtime!(runtime, memory: [[0u8;32], ]); let result; #[block] { - result = runtime.bench_balance(memory.as_mut_slice(), 4, 0); + result = runtime.bench_balance(memory.as_mut_slice(), 0); } assert_ok!(result); - assert_eq!( - ::decode(&mut &memory[4..]).unwrap(), - runtime.ext().balance().into() - ); + assert_eq!(U256::from_little_endian(&memory[..]), runtime.ext().balance()); } #[benchmark(pov_mode = Measured)] fn seal_value_transferred() { - let len = ::max_encoded_len() as u32; - build_runtime!(runtime, memory: [len.to_le_bytes(), vec![0u8; len as _], ]); + build_runtime!(runtime, memory: [[0u8;32], ]); let result; #[block] { - result = runtime.bench_value_transferred(memory.as_mut_slice(), 4, 0); + result = runtime.bench_value_transferred(memory.as_mut_slice(), 0); } assert_ok!(result); - assert_eq!( - ::decode(&mut &memory[4..]).unwrap(), - runtime.ext().value_transferred().into() - ); + assert_eq!(U256::from_little_endian(&memory[..]), runtime.ext().value_transferred()); } #[benchmark(pov_mode = Measured)] fn seal_minimum_balance() { - let len = ::max_encoded_len() as u32; - build_runtime!(runtime, memory: [len.to_le_bytes(), vec![0u8; len as _], ]); + build_runtime!(runtime, memory: [[0u8;32], ]); let result; #[block] { - result = runtime.bench_minimum_balance(memory.as_mut_slice(), 4, 0); + result = runtime.bench_minimum_balance(memory.as_mut_slice(), 0); } assert_ok!(result); - assert_eq!( - ::decode(&mut &memory[4..]).unwrap(), - runtime.ext().minimum_balance().into() - ); + assert_eq!(U256::from_little_endian(&memory[..]), runtime.ext().minimum_balance()); } #[benchmark(pov_mode = Measured)] fn seal_block_number() { - let len = as MaxEncodedLen>::max_encoded_len() as u32; - build_runtime!(runtime, memory: [len.to_le_bytes(), vec![0u8; len as _], ]); + build_runtime!(runtime, memory: [[0u8;32], ]); let result; #[block] { - result = runtime.bench_block_number(memory.as_mut_slice(), 4, 0); + result = runtime.bench_block_number(memory.as_mut_slice(), 0); } assert_ok!(result); - assert_eq!( - >::decode(&mut &memory[4..]).unwrap(), - runtime.ext().block_number() - ); + assert_eq!(U256::from_little_endian(&memory[..]), runtime.ext().block_number()); } #[benchmark(pov_mode = Measured)] fn seal_now() { - let len = as MaxEncodedLen>::max_encoded_len() as u32; - build_runtime!(runtime, memory: [len.to_le_bytes(), vec![0u8; len as _], ]); + build_runtime!(runtime, memory: [[0u8;32], ]); let result; #[block] { - result = runtime.bench_now(memory.as_mut_slice(), 4, 0); + result = runtime.bench_now(memory.as_mut_slice(), 0); } assert_ok!(result); - assert_eq!(>::decode(&mut &memory[4..]).unwrap(), *runtime.ext().now()); + assert_eq!(U256::from_little_endian(&memory[..]), runtime.ext().now()); } #[benchmark(pov_mode = Measured)] fn seal_weight_to_fee() { - let len = ::max_encoded_len() as u32; - build_runtime!(runtime, memory: [len.to_le_bytes(), vec![0u8; len as _], ]); + build_runtime!(runtime, memory: [[0u8;32], ]); let weight = Weight::from_parts(500_000, 300_000); let result; #[block] @@ -748,15 +671,11 @@ mod benchmarks { memory.as_mut_slice(), weight.ref_time(), weight.proof_size(), - 4, 0, ); } assert_ok!(result); - assert_eq!( - >::decode(&mut &memory[4..]).unwrap(), - runtime.ext().get_weight_price(weight) - ); + assert_eq!(U256::from_little_endian(&memory[..]), runtime.ext().get_weight_price(weight)); } #[benchmark(pov_mode = Measured)] @@ -828,28 +747,37 @@ mod benchmarks { t: Linear<0, { limits::NUM_EVENT_TOPICS as u32 }>, n: Linear<0, { limits::PAYLOAD_BYTES }>, ) { - let topics = (0..t).map(|i| T::Hashing::hash_of(&i)).collect::>().encode(); - let topics_len = topics.len() as u32; - - build_runtime!(runtime, memory: [ - n.to_le_bytes(), - topics, - vec![0u8; n as _], - ]); + let num_topic = t as u32; + let topics = (0..t).map(|i| H256::repeat_byte(i as u8)).collect::>(); + let topics_data = + topics.iter().flat_map(|hash| hash.as_bytes().to_vec()).collect::>(); + let data = vec![42u8; n as _]; + build_runtime!(runtime, instance, memory: [ topics_data, data, ]); let result; #[block] { result = runtime.bench_deposit_event( memory.as_mut_slice(), - 4, // topics_ptr - topics_len, // topics_len - 4 + topics_len, // data_ptr - 0, // data_len + 0, // topics_ptr + num_topic, + topics_data.len() as u32, // data_ptr + n, // data_len ); } - assert_ok!(result); + + let events = System::::events(); + let record = &events[events.len() - 1]; + + assert_eq!( + record.event, + crate::Event::ContractEmitted { contract: instance.address(), data }.into(), + ); + assert_eq!( + record.topics.iter().map(|t| H256::from_slice(t.as_ref())).collect::>(), + topics, + ); } // Benchmark debug_message call @@ -1435,7 +1363,7 @@ mod benchmarks { let account_bytes = account.encode(); let account_len = account_bytes.len() as u32; - let value_bytes = value.encode(); + let value_bytes = Into::::into(value).encode(); let mut memory = memory!(account_bytes, value_bytes,); let result; @@ -1461,10 +1389,10 @@ mod benchmarks { let callee_len = callee_bytes.len() as u32; let value: BalanceOf = t.into(); - let value_bytes = value.encode(); + let value_bytes = Into::::into(value).encode(); let deposit: BalanceOf = (u32::MAX - 100).into(); - let deposit_bytes = deposit.encode(); + let deposit_bytes = Into::::into(deposit).encode(); let deposit_len = deposit_bytes.len() as u32; let mut setup = CallSetup::::default(); @@ -1536,11 +1464,11 @@ mod benchmarks { let hash_len = hash_bytes.len() as u32; let value: BalanceOf = 1u32.into(); - let value_bytes = value.encode(); + let value_bytes = Into::::into(value).encode(); let value_len = value_bytes.len() as u32; let deposit: BalanceOf = 0u32.into(); - let deposit_bytes = deposit.encode(); + let deposit_bytes = Into::::into(deposit).encode(); let deposit_len = deposit_bytes.len() as u32; let mut setup = CallSetup::::default(); diff --git a/substrate/frame/revive/src/exec.rs b/substrate/frame/revive/src/exec.rs index 649479f7790f..016bdec37afd 100644 --- a/substrate/frame/revive/src/exec.rs +++ b/substrate/frame/revive/src/exec.rs @@ -37,7 +37,7 @@ use frame_support::{ traits::{ fungible::{Inspect, Mutate}, tokens::{Fortitude, Preservation}, - Contains, OriginTrait, Time, + Contains, IsType, OriginTrait, Time, }, weights::Weight, Blake2_128Concat, BoundedVec, StorageHasher, @@ -49,7 +49,7 @@ use frame_system::{ use sp_core::{ ecdsa::Public as ECDSAPublic, sr25519::{Public as SR25519Public, Signature as SR25519Signature}, - ConstU32, Get, H160, H256, + ConstU32, Get, H160, H256, U256, }; use sp_io::{crypto::secp256k1_ecdsa_recover_compressed, hashing::blake2_256}; use sp_runtime::{ @@ -61,9 +61,6 @@ pub type AccountIdOf = ::AccountId; pub type MomentOf = <::Time as Time>::Moment; pub type ExecResult = Result; -/// A type that represents a topic of an event. At the moment a hash is used. -pub type TopicOf = ::Hash; - /// Type for variable sized storage key. Used for transparent hashing. type VarSizedKey = BoundedVec>; @@ -184,9 +181,9 @@ pub trait Ext: sealing::Sealed { fn call( &mut self, gas_limit: Weight, - deposit_limit: BalanceOf, + deposit_limit: U256, to: &H160, - value: BalanceOf, + value: U256, input_data: Vec, allows_reentry: bool, read_only: bool, @@ -209,9 +206,9 @@ pub trait Ext: sealing::Sealed { fn instantiate( &mut self, gas_limit: Weight, - deposit_limit: BalanceOf, + deposit_limit: U256, code: H256, - value: BalanceOf, + value: U256, input_data: Vec, salt: Option<&[u8; 32]>, ) -> Result<(H160, ExecReturnValue), ExecError>; @@ -226,7 +223,7 @@ pub trait Ext: sealing::Sealed { fn terminate(&mut self, beneficiary: &H160) -> DispatchResult; /// Transfer some amount of funds into the specified account. - fn transfer(&mut self, to: &H160, value: BalanceOf) -> DispatchResult; + fn transfer(&mut self, to: &H160, value: U256) -> DispatchResult; /// Returns the storage entry of the executing account by the given `key`. /// @@ -304,30 +301,30 @@ pub trait Ext: sealing::Sealed { /// Returns the balance of the current contract. /// /// The `value_transferred` is already added. - fn balance(&self) -> BalanceOf; + fn balance(&self) -> U256; /// Returns the value transferred along with this call. - fn value_transferred(&self) -> BalanceOf; + fn value_transferred(&self) -> U256; - /// Returns a reference to the timestamp of the current block - fn now(&self) -> &MomentOf; + /// Returns the timestamp of the current block + fn now(&self) -> U256; /// Returns the minimum balance that is required for creating an account. - fn minimum_balance(&self) -> BalanceOf; + fn minimum_balance(&self) -> U256; /// Deposit an event with the given topics. /// /// There should not be any duplicates in `topics`. - fn deposit_event(&mut self, topics: Vec>, data: Vec); + fn deposit_event(&mut self, topics: Vec, data: Vec); /// Returns the current block number. - fn block_number(&self) -> BlockNumberFor; + fn block_number(&self) -> U256; /// Returns the maximum allowed size of a storage item. fn max_value_size(&self) -> u32; /// Returns the price for the specified amount of weight. - fn get_weight_price(&self, weight: Weight) -> BalanceOf; + fn get_weight_price(&self, weight: Weight) -> U256; /// Get an immutable reference to the nested gas meter. fn gas_meter(&self) -> &GasMeter; @@ -697,6 +694,9 @@ impl CachedContract { impl<'a, T, E> Stack<'a, T, E> where T: Config, + T::Hash: IsType, + BalanceOf: Into + TryFrom, + MomentOf: Into, E: Executable, { /// Create and run a new call stack by calling into `dest`. @@ -1239,16 +1239,19 @@ where impl<'a, T, E> Ext for Stack<'a, T, E> where T: Config, + T::Hash: IsType, E: Executable, + BalanceOf: Into + TryFrom, + MomentOf: Into, { type T = T; fn call( &mut self, gas_limit: Weight, - deposit_limit: BalanceOf, + deposit_limit: U256, dest: &H160, - value: BalanceOf, + value: U256, input_data: Vec, allows_reentry: bool, read_only: bool, @@ -1277,9 +1280,9 @@ where }); let executable = self.push_frame( FrameArgs::Call { dest, cached_info, delegated_call: None }, - value, + value.try_into().map_err(|_| Error::::BalanceConversionFailed)?, gas_limit, - deposit_limit, + deposit_limit.try_into().map_err(|_| Error::::BalanceConversionFailed)?, // Enable read-only access if requested; cannot disable it if already set. read_only || self.is_read_only(), )?; @@ -1322,9 +1325,9 @@ where fn instantiate( &mut self, gas_limit: Weight, - deposit_limit: BalanceOf, + deposit_limit: U256, code_hash: H256, - value: BalanceOf, + value: U256, input_data: Vec, salt: Option<&[u8; 32]>, ) -> Result<(H160, ExecReturnValue), ExecError> { @@ -1337,9 +1340,9 @@ where salt, input_data: input_data.as_ref(), }, - value, + value.try_into().map_err(|_| Error::::BalanceConversionFailed)?, gas_limit, - deposit_limit, + deposit_limit.try_into().map_err(|_| Error::::BalanceConversionFailed)?, self.is_read_only(), )?; let address = T::AddressMapper::to_address(&self.top_frame().account_id); @@ -1374,12 +1377,12 @@ where Ok(()) } - fn transfer(&mut self, to: &H160, value: BalanceOf) -> DispatchResult { + fn transfer(&mut self, to: &H160, value: U256) -> DispatchResult { Self::transfer( Preservation::Preserve, &self.top_frame().account_id, &T::AddressMapper::to_account_id(to), - value, + value.try_into().map_err(|_| Error::::BalanceConversionFailed)?, ) } @@ -1462,27 +1465,28 @@ where self.caller_is_origin() && self.origin == Origin::Root } - fn balance(&self) -> BalanceOf { + fn balance(&self) -> U256 { T::Currency::reducible_balance( &self.top_frame().account_id, Preservation::Preserve, Fortitude::Polite, ) + .into() } - fn value_transferred(&self) -> BalanceOf { - self.top_frame().value_transferred + fn value_transferred(&self) -> U256 { + self.top_frame().value_transferred.into() } - fn now(&self) -> &MomentOf { - &self.timestamp + fn now(&self) -> U256 { + self.timestamp.into() } - fn minimum_balance(&self) -> BalanceOf { - T::Currency::minimum_balance() + fn minimum_balance(&self) -> U256 { + T::Currency::minimum_balance().into() } - fn deposit_event(&mut self, topics: Vec, data: Vec) { + fn deposit_event(&mut self, topics: Vec, data: Vec) { Contracts::::deposit_indexed_event( topics, Event::ContractEmitted { @@ -1492,16 +1496,16 @@ where ); } - fn block_number(&self) -> BlockNumberFor { - self.block_number + fn block_number(&self) -> U256 { + self.block_number.into() } fn max_value_size(&self) -> u32 { limits::PAYLOAD_BYTES } - fn get_weight_price(&self, weight: Weight) -> BalanceOf { - T::WeightPrice::convert(weight) + fn get_weight_price(&self, weight: Weight) -> U256 { + T::WeightPrice::convert(weight).into() } fn gas_meter(&self) -> &GasMeter { @@ -1864,7 +1868,7 @@ mod tests { let value = 55; let success_ch = MockLoader::insert(Call, move |ctx, _| { - assert_eq!(ctx.ext.value_transferred(), value); + assert_eq!(ctx.ext.value_transferred(), U256::from(value)); Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) }); @@ -1896,12 +1900,12 @@ mod tests { let value = 35; let success_ch = MockLoader::insert(Call, move |ctx, _| { - assert_eq!(ctx.ext.value_transferred(), value); + assert_eq!(ctx.ext.value_transferred(), U256::from(value)); Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) }); let delegate_ch = MockLoader::insert(Call, move |ctx, _| { - assert_eq!(ctx.ext.value_transferred(), value); + assert_eq!(ctx.ext.value_transferred(), U256::from(value)); let _ = ctx.ext.delegate_call(success_ch, Vec::new())?; Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) }); @@ -2112,9 +2116,9 @@ mod tests { // Try to call into yourself. let r = ctx.ext.call( Weight::zero(), - BalanceOf::::zero(), + U256::zero(), &BOB_ADDR, - 0, + U256::zero(), vec![], true, false, @@ -2175,9 +2179,9 @@ mod tests { assert_matches!( ctx.ext.call( Weight::zero(), - BalanceOf::::zero(), + U256::zero(), &CHARLIE_ADDR, - 0, + U256::zero(), vec![], true, false @@ -2316,9 +2320,9 @@ mod tests { // BOB calls CHARLIE ctx.ext.call( Weight::zero(), - BalanceOf::::zero(), + U256::zero(), &CHARLIE_ADDR, - 0, + U256::zero(), vec![], true, false, @@ -2410,9 +2414,9 @@ mod tests { // BOB calls CHARLIE. ctx.ext.call( Weight::zero(), - BalanceOf::::zero(), + U256::zero(), &CHARLIE_ADDR, - 0, + U256::zero(), vec![], true, false, @@ -2448,9 +2452,9 @@ mod tests { assert_matches!( ctx.ext.call( Weight::zero(), - BalanceOf::::zero(), + U256::zero(), &CHARLIE_ADDR, - 0, + U256::zero(), vec![], true, false @@ -2621,9 +2625,9 @@ mod tests { .ext .instantiate( Weight::zero(), - BalanceOf::::zero(), + U256::zero(), dummy_ch, - ::Currency::minimum_balance(), + ::Currency::minimum_balance().into(), vec![], Some(&[48; 32]), ) @@ -2699,9 +2703,9 @@ mod tests { assert_matches!( ctx.ext.instantiate( Weight::zero(), - BalanceOf::::zero(), + U256::zero(), dummy_ch, - ::Currency::minimum_balance(), + ::Currency::minimum_balance().into(), vec![], Some(&[0; 32]), ), @@ -2804,9 +2808,9 @@ mod tests { assert_eq!( ctx.ext.call( Weight::zero(), - BalanceOf::::zero(), + U256::zero(), &CHARLIE_ADDR, - 0, + U256::zero(), vec![], true, false @@ -2820,15 +2824,7 @@ mod tests { let code_charlie = MockLoader::insert(Call, |ctx, _| { assert!(ctx .ext - .call( - Weight::zero(), - BalanceOf::::zero(), - &BOB_ADDR, - 0, - vec![99], - true, - false - ) + .call(Weight::zero(), U256::zero(), &BOB_ADDR, U256::zero(), vec![99], true, false) .is_ok()); exec_trapped() }); @@ -2860,7 +2856,7 @@ mod tests { let addr = ::AddressMapper::to_address(&account_id); assert_matches!( - ctx.ext.call(Weight::zero(), BalanceOf::::zero(), &addr, 0, vec![], + ctx.ext.call(Weight::zero(), U256::zero(), &addr, U256::zero(), vec![], true, false), Err(ExecError{error, ..}) if error == >::ContractNotFound.into() ); exec_success() @@ -2998,7 +2994,7 @@ mod tests { let code_bob = MockLoader::insert(Call, |ctx, _| { let dest = H160::from_slice(ctx.input_data.as_ref()); ctx.ext - .call(Weight::zero(), BalanceOf::::zero(), &dest, 0, vec![], false, false) + .call(Weight::zero(), U256::zero(), &dest, U256::zero(), vec![], false, false) }); let code_charlie = MockLoader::insert(Call, |_, _| exec_success()); @@ -3043,9 +3039,9 @@ mod tests { if ctx.input_data[0] == 0 { ctx.ext.call( Weight::zero(), - BalanceOf::::zero(), + U256::zero(), &CHARLIE_ADDR, - 0, + U256::zero(), vec![], false, false, @@ -3059,9 +3055,9 @@ mod tests { let code_charlie = MockLoader::insert(Call, |ctx, _| { ctx.ext.call( Weight::zero(), - BalanceOf::::zero(), + U256::zero(), &BOB_ADDR, - 0, + U256::zero(), vec![1], true, false, @@ -3251,7 +3247,7 @@ mod tests { ctx.ext .instantiate( Weight::zero(), - BalanceOf::::zero(), + U256::zero(), fail_code, ctx.ext.minimum_balance() * 100, vec![], @@ -3268,7 +3264,7 @@ mod tests { .ext .instantiate( Weight::zero(), - BalanceOf::::zero(), + U256::zero(), success_code, ctx.ext.minimum_balance() * 100, vec![], @@ -3284,7 +3280,7 @@ mod tests { // a plain call should not influence the account counter ctx.ext - .call(Weight::zero(), BalanceOf::::zero(), &addr, 0, vec![], false, false) + .call(Weight::zero(), U256::zero(), &addr, U256::zero(), vec![], false, false) .unwrap(); assert_eq!(System::account_nonce(ALICE), alice_nonce); @@ -3822,9 +3818,9 @@ mod tests { assert_eq!( ctx.ext.call( Weight::zero(), - BalanceOf::::zero(), + U256::zero(), &CHARLIE_ADDR, - 0, + U256::zero(), vec![], true, false, @@ -3849,15 +3845,7 @@ mod tests { let code_charlie = MockLoader::insert(Call, |ctx, _| { assert!(ctx .ext - .call( - Weight::zero(), - BalanceOf::::zero(), - &BOB_ADDR, - 0, - vec![99], - true, - false - ) + .call(Weight::zero(), U256::zero(), &BOB_ADDR, U256::zero(), vec![99], true, false) .is_ok()); // CHARLIE can not read BOB`s storage. assert_eq!(ctx.ext.get_transient_storage(storage_key_1), None); @@ -3934,9 +3922,9 @@ mod tests { assert_eq!( ctx.ext.call( Weight::zero(), - BalanceOf::::zero(), + U256::zero(), &CHARLIE_ADDR, - 0, + U256::zero(), vec![], true, false @@ -3957,15 +3945,7 @@ mod tests { let code_charlie = MockLoader::insert(Call, |ctx, _| { assert!(ctx .ext - .call( - Weight::zero(), - BalanceOf::::zero(), - &BOB_ADDR, - 0, - vec![99], - true, - false - ) + .call(Weight::zero(), U256::zero(), &BOB_ADDR, U256::zero(), vec![99], true, false) .is_ok()); exec_trapped() }); diff --git a/substrate/frame/revive/src/lib.rs b/substrate/frame/revive/src/lib.rs index 393acc8c9852..4c6e5cd26a11 100644 --- a/substrate/frame/revive/src/lib.rs +++ b/substrate/frame/revive/src/lib.rs @@ -27,7 +27,10 @@ mod benchmarking_dummy; mod exec; mod gas; mod primitives; +use crate::exec::MomentOf; +use frame_support::traits::IsType; pub use primitives::*; +use sp_core::U256; mod limits; mod storage; @@ -36,7 +39,6 @@ mod wasm; pub mod chain_extension; pub mod debug; -pub mod migration; pub mod test_utils; pub mod weights; @@ -54,7 +56,7 @@ use environmental::*; use frame_support::{ dispatch::{ DispatchErrorWithPostInfo, DispatchResultWithPostInfo, GetDispatchInfo, Pays, - PostDispatchInfo, RawOrigin, WithPostDispatchInfo, + PostDispatchInfo, RawOrigin, }, ensure, traits::{ @@ -79,7 +81,6 @@ use sp_runtime::{ pub use crate::{ address::{AddressMapper, DefaultAddressMapper}, debug::Tracing, - migration::{MigrateSequence, Migration, NoopMigration}, pallet::*, }; pub use weights::WeightInfo; @@ -129,6 +130,7 @@ pub mod pallet { use crate::debug::Debugger; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; + use sp_core::U256; use sp_runtime::Perbill; /// The in-code storage version. @@ -206,7 +208,7 @@ pub mod pallet { /// /// # Note /// - /// It is safe to chage this value on a live chain as all refunds are pro rata. + /// It is safe to change this value on a live chain as all refunds are pro rata. #[pallet::constant] #[pallet::no_default_bounds] type DepositPerByte: Get>; @@ -215,7 +217,7 @@ pub mod pallet { /// /// # Note /// - /// It is safe to chage this value on a live chain as all refunds are pro rata. + /// It is safe to change this value on a live chain as all refunds are pro rata. #[pallet::constant] #[pallet::no_default_bounds] type DepositPerItem: Get>; @@ -271,25 +273,6 @@ pub mod pallet { #[pallet::no_default_bounds] type InstantiateOrigin: EnsureOrigin; - /// The sequence of migration steps that will be applied during a migration. - /// - /// # Examples - /// ```ignore - /// use pallet_revive::migration::{v10, v11}; - /// # struct Runtime {}; - /// # struct Currency {}; - /// type Migrations = (v10::Migration, v11::Migration); - /// ``` - /// - /// If you have a single migration step, you can use a tuple with a single element: - /// ```ignore - /// use pallet_revive::migration::v10; - /// # struct Runtime {}; - /// # struct Currency {}; - /// type Migrations = (v10::Migration,); - /// ``` - type Migrations: MigrateSequence; - /// For most production chains, it's recommended to use the `()` implementation of this /// trait. This implementation offers additional logging when the log target /// "runtime::revive" is set to trace. @@ -305,13 +288,13 @@ pub mod pallet { BlockNumberFor, >; - /// The amount of memory in bytes that parachain nodes alot to the runtime. + /// The amount of memory in bytes that parachain nodes a lot to the runtime. /// /// This is used in [`Pallet::integrity_test`] to make sure that the runtime has enough /// memory to support this pallet if set to the correct value. type RuntimeMemory: Get; - /// The amount of memory in bytes that relay chain validators alot to the PoV. + /// The amount of memory in bytes that relay chain validators a lot to the PoV. /// /// This is used in [`Pallet::integrity_test`] to make sure that the runtime has enough /// memory to support this pallet if set to the correct value. @@ -382,7 +365,6 @@ pub mod pallet { type DepositPerByte = DepositPerByte; type DepositPerItem = DepositPerItem; type MaxCodeLen = ConstU32<{ 123 * 1024 }>; - type Migrations = (); type Time = Self; type UnsafeUnstableInterface = ConstBool; type UploadOrigin = EnsureSigned; @@ -549,10 +531,6 @@ pub mod pallet { /// A more detailed error can be found on the node console if debug messages are enabled /// by supplying `-lruntime::revive=debug`. CodeRejected, - /// A pending migration needs to complete before the extrinsic can be called. - MigrationInProgress, - /// Migrate dispatch call was attempted but no migration was performed. - NoMigrationPerformed, /// The contract has reached its maximum number of delegate dependencies. MaxDelegateDependenciesReached, /// The dependency was not found in the contract's delegate dependencies. @@ -569,6 +547,8 @@ pub mod pallet { InvalidStorageFlags, /// PolkaVM failed during code execution. Probably due to a malformed program. ExecutionFailed, + /// Failed to convert a U256 to a Balance. + BalanceConversionFailed, } /// A reason for the pallet contracts placing a hold on funds. @@ -605,12 +585,6 @@ pub mod pallet { pub(crate) type DeletionQueueCounter = StorageValue<_, DeletionQueueManager, ValueQuery>; - /// A migration can span across multiple blocks. This storage defines a cursor to track the - /// progress of the migration, enabling us to resume from the last completed position. - #[pallet::storage] - pub(crate) type MigrationInProgress = - StorageValue<_, migration::Cursor, OptionQuery>; - #[pallet::extra_constants] impl Pallet { #[pallet::constant_name(ApiVersion)] @@ -620,31 +594,17 @@ pub mod pallet { } #[pallet::hooks] - impl Hooks> for Pallet { + impl Hooks> for Pallet + where + T::Hash: IsType, + { fn on_idle(_block: BlockNumberFor, limit: Weight) -> Weight { - use migration::MigrateResult::*; let mut meter = WeightMeter::with_limit(limit); - - loop { - match Migration::::migrate(&mut meter) { - // There is not enough weight to perform a migration. - // We can't do anything more, so we return the used weight. - NoMigrationPerformed | InProgress { steps_done: 0 } => return meter.consumed(), - // Migration is still in progress, we can start the next step. - InProgress { .. } => continue, - // Either no migration is in progress, or we are done with all migrations, we - // can do some more other work with the remaining weight. - Completed | NoMigrationInProgress => break, - } - } - ContractInfo::::process_deletion_queue_batch(&mut meter); meter.consumed() } fn integrity_test() { - Migration::::integrity_test(); - // Total runtime memory limit let max_runtime_mem: u32 = T::RuntimeMemory::get(); // Memory limits for a single contract: @@ -771,7 +731,10 @@ pub mod pallet { #[pallet::call] impl Pallet where + T::Hash: IsType, as HasCompact>::Type: Clone + Eq + PartialEq + Debug + TypeInfo + Encode, + BalanceOf: Into + TryFrom, + MomentOf: Into, { /// Makes a call to an account, optionally transferring some balance. /// @@ -957,7 +920,6 @@ pub mod pallet { origin: OriginFor, code_hash: sp_core::H256, ) -> DispatchResultWithPostInfo { - Migration::::ensure_migrated()?; let origin = ensure_signed(origin)?; >::remove(&origin, code_hash)?; // we waive the fee because removing unused code is beneficial @@ -981,7 +943,6 @@ pub mod pallet { dest: H160, code_hash: sp_core::H256, ) -> DispatchResult { - Migration::::ensure_migrated()?; ensure_root(origin)?; >::try_mutate(&dest, |contract| { let contract = if let Some(contract) = contract { @@ -1000,40 +961,6 @@ pub mod pallet { Ok(()) }) } - - /// When a migration is in progress, this dispatchable can be used to run migration steps. - /// Calls that contribute to advancing the migration have their fees waived, as it's helpful - /// for the chain. Note that while the migration is in progress, the pallet will also - /// leverage the `on_idle` hooks to run migration steps. - #[pallet::call_index(6)] - #[pallet::weight(T::WeightInfo::migrate().saturating_add(*weight_limit))] - pub fn migrate(origin: OriginFor, weight_limit: Weight) -> DispatchResultWithPostInfo { - use migration::MigrateResult::*; - ensure_signed(origin)?; - - let weight_limit = weight_limit.saturating_add(T::WeightInfo::migrate()); - let mut meter = WeightMeter::with_limit(weight_limit); - let result = Migration::::migrate(&mut meter); - - match result { - Completed => Ok(PostDispatchInfo { - actual_weight: Some(meter.consumed()), - pays_fee: Pays::No, - }), - InProgress { steps_done, .. } if steps_done > 0 => Ok(PostDispatchInfo { - actual_weight: Some(meter.consumed()), - pays_fee: Pays::No, - }), - InProgress { .. } => Ok(PostDispatchInfo { - actual_weight: Some(meter.consumed()), - pays_fee: Pays::Yes, - }), - NoMigrationInProgress | NoMigrationPerformed => { - let err: DispatchError = >::NoMigrationPerformed.into(); - Err(err.with_weight(meter.consumed())) - }, - } - } } } @@ -1053,7 +980,12 @@ fn dispatch_result( .map_err(|e| DispatchErrorWithPostInfo { post_info, error: e }) } -impl Pallet { +impl Pallet +where + BalanceOf: Into + TryFrom, + MomentOf: Into, + T::Hash: IsType, +{ /// A generalized version of [`Self::call`]. /// /// Identical to [`Self::call`] but tailored towards being called by other code within the @@ -1078,7 +1010,6 @@ impl Pallet { None }; let try_call = || { - Migration::::ensure_migrated()?; let origin = Origin::from_runtime_origin(origin)?; let mut storage_meter = StorageMeter::new(&origin, storage_deposit_limit, value)?; let result = ExecStack::>::run_call( @@ -1131,7 +1062,6 @@ impl Pallet { let mut debug_message = if debug == DebugInfo::UnsafeDebug { Some(DebugBuffer::default()) } else { None }; let try_instantiate = || { - Migration::::ensure_migrated()?; let instantiate_account = T::InstantiateOrigin::ensure_origin(origin.clone())?; let (executable, upload_deposit) = match code { Code::Upload(code) => { @@ -1192,7 +1122,6 @@ impl Pallet { code: Vec, storage_deposit_limit: BalanceOf, ) -> CodeUploadResult> { - Migration::::ensure_migrated()?; let origin = T::UploadOrigin::ensure_origin(origin)?; let (module, deposit) = Self::try_upload_code(origin, code, storage_deposit_limit, None)?; Ok(CodeUploadReturnValue { code_hash: *module.code_hash(), deposit }) @@ -1200,9 +1129,6 @@ impl Pallet { /// Query storage of a specified contract under a specified key. pub fn get_storage(address: H160, key: [u8; 32]) -> GetStorageResult { - if Migration::::in_progress() { - return Err(ContractAccessError::MigrationInProgress) - } let contract_info = ContractInfoOf::::get(&address).ok_or(ContractAccessError::DoesntExist)?; @@ -1226,24 +1152,6 @@ impl Pallet { Ok((module, deposit)) } - /// Deposit a pallet contracts event. - fn deposit_event(event: Event) { - >::deposit_event(::RuntimeEvent::from(event)) - } - - /// Deposit a pallet contracts indexed event. - fn deposit_indexed_event(topics: Vec, event: Event) { - >::deposit_event_indexed( - &topics, - ::RuntimeEvent::from(event).into(), - ) - } - - /// Return the existential deposit of [`Config::Currency`]. - fn min_balance() -> BalanceOf { - >>::minimum_balance() - } - /// Run the supplied function `f` if no other instance of this pallet is on the stack. fn run_guarded Result>(f: F) -> Result { executing_contract::using_once(&mut false, || { @@ -1264,6 +1172,30 @@ impl Pallet { } } +impl Pallet +where + T: Config, + T::Hash: IsType, +{ + /// Return the existential deposit of [`Config::Currency`]. + fn min_balance() -> BalanceOf { + >>::minimum_balance() + } + + /// Deposit a pallet contracts event. + fn deposit_event(event: Event) { + >::deposit_event(::RuntimeEvent::from(event)) + } + + /// Deposit a pallet contracts indexed event. + fn deposit_indexed_event(topics: Vec, event: Event) { + >::deposit_event_indexed( + &topics.into_iter().map(Into::into).collect::>(), + ::RuntimeEvent::from(event).into(), + ) + } +} + // Set up a global reference to the boolean flag used for the re-entrancy guard. environmental!(executing_contract: bool); diff --git a/substrate/frame/revive/src/migration.rs b/substrate/frame/revive/src/migration.rs deleted file mode 100644 index b67467b322f5..000000000000 --- a/substrate/frame/revive/src/migration.rs +++ /dev/null @@ -1,650 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Multi-block Migration framework for pallet-revive. -//! -//! This module allows us to define a migration as a sequence of [`MigrationStep`]s that can be -//! executed across multiple blocks. -//! -//! # Usage -//! -//! A migration step is defined under `src/migration/vX.rs`, where `X` is the version number. -//! For example, `vX.rs` defines a migration from version `X - 1` to version `X`. -//! -//! ## Example: -//! -//! To configure a migration to `v11` for a runtime using `v10` of pallet-revive on the chain, -//! you would set the `Migrations` type as follows: -//! -//! ```ignore -//! use pallet_revive::migration::{v10, v11}; -//! # pub enum Runtime {}; -//! # struct Currency; -//! type Migrations = (v10::Migration, v11::Migration); -//! ``` -//! -//! ## Notes: -//! -//! - Migrations should always be tested with `try-runtime` before being deployed. -//! - By testing with `try-runtime` against a live network, you ensure that all migration steps work -//! and that you have included the required steps. -//! -//! ## Low Level / Implementation Details -//! -//! When a migration starts and [`OnRuntimeUpgrade::on_runtime_upgrade`] is called, instead of -//! performing the actual migration, we set a custom storage item [`MigrationInProgress`]. -//! This storage item defines a [`Cursor`] for the current migration. -//! -//! If the [`MigrationInProgress`] storage item exists, it means a migration is in progress, and its -//! value holds a cursor for the current migration step. These migration steps are executed during -//! [`Hooks::on_idle`] or when the [`Pallet::migrate`] dispatchable is -//! called. -//! -//! While the migration is in progress, all dispatchables except `migrate`, are blocked, and returns -//! a `MigrationInProgress` error. - -include!(concat!(env!("OUT_DIR"), "/migration_codegen.rs")); - -use crate::{weights::WeightInfo, Config, Error, MigrationInProgress, Pallet, Weight, LOG_TARGET}; -use codec::{Codec, Decode}; -use core::marker::PhantomData; -use frame_support::{ - pallet_prelude::*, - traits::{ConstU32, OnRuntimeUpgrade}, - weights::WeightMeter, -}; -use sp_runtime::Saturating; - -#[cfg(feature = "try-runtime")] -use alloc::vec::Vec; -#[cfg(feature = "try-runtime")] -use sp_runtime::TryRuntimeError; - -const PROOF_ENCODE: &str = "Tuple::max_encoded_len() < Cursor::max_encoded_len()` is verified in `Self::integrity_test()`; qed"; -const PROOF_DECODE: &str = - "We encode to the same type in this trait only. No other code touches this item; qed"; - -fn invalid_version(version: StorageVersion) -> ! { - panic!("Required migration {version:?} not supported by this runtime. This is a bug."); -} - -/// The cursor used to encode the position (usually the last iterated key) of the current migration -/// step. -pub type Cursor = BoundedVec>; - -/// IsFinished describes whether a migration is finished or not. -pub enum IsFinished { - Yes, - No, -} - -/// A trait that allows to migrate storage from one version to another. -/// -/// The migration is done in steps. The migration is finished when -/// `step()` returns `IsFinished::Yes`. -pub trait MigrationStep: Codec + MaxEncodedLen + Default { - /// Returns the version of the migration. - const VERSION: u16; - - /// Returns the maximum weight that can be consumed in a single step. - fn max_step_weight() -> Weight; - - /// Process one step of the migration. - /// - /// Returns whether the migration is finished. - fn step(&mut self, meter: &mut WeightMeter) -> IsFinished; - - /// Verify that the migration step fits into `Cursor`, and that `max_step_weight` is not greater - /// than `max_block_weight`. - fn integrity_test(max_block_weight: Weight) { - if Self::max_step_weight().any_gt(max_block_weight) { - panic!( - "Invalid max_step_weight for Migration {}. Value should be lower than {}", - Self::VERSION, - max_block_weight - ); - } - - let len = ::max_encoded_len(); - let max = Cursor::bound(); - if len > max { - panic!( - "Migration {} has size {} which is bigger than the maximum of {}", - Self::VERSION, - len, - max, - ); - } - } - - /// Execute some pre-checks prior to running the first step of this migration. - #[cfg(feature = "try-runtime")] - fn pre_upgrade_step() -> Result, TryRuntimeError> { - Ok(Vec::new()) - } - - /// Execute some post-checks after running the last step of this migration. - #[cfg(feature = "try-runtime")] - fn post_upgrade_step(_state: Vec) -> Result<(), TryRuntimeError> { - Ok(()) - } -} - -/// A noop migration that can be used when there is no migration to be done for a given version. -#[doc(hidden)] -#[derive(frame_support::DefaultNoBound, Encode, Decode, MaxEncodedLen)] -pub struct NoopMigration; - -impl MigrationStep for NoopMigration { - const VERSION: u16 = N; - fn max_step_weight() -> Weight { - Weight::zero() - } - fn step(&mut self, _meter: &mut WeightMeter) -> IsFinished { - log::debug!(target: LOG_TARGET, "Noop migration for version {}", N); - IsFinished::Yes - } -} - -mod private { - use crate::migration::MigrationStep; - pub trait Sealed {} - #[impl_trait_for_tuples::impl_for_tuples(10)] - #[tuple_types_custom_trait_bound(MigrationStep)] - impl Sealed for Tuple {} -} - -/// Defines a sequence of migrations. -/// -/// The sequence must be defined by a tuple of migrations, each of which must implement the -/// `MigrationStep` trait. Migrations must be ordered by their versions with no gaps. -pub trait MigrateSequence: private::Sealed { - /// Returns the range of versions that this migrations sequence can handle. - /// Migrations must be ordered by their versions with no gaps. - /// - /// The following code will fail to compile: - /// - /// ```compile_fail - /// # use pallet_revive::{NoopMigration, MigrateSequence}; - /// let _ = <(NoopMigration<1>, NoopMigration<3>)>::VERSION_RANGE; - /// ``` - /// The following code will compile: - /// ``` - /// # use pallet_revive::{NoopMigration, MigrateSequence}; - /// let _ = <(NoopMigration<1>, NoopMigration<2>)>::VERSION_RANGE; - /// ``` - const VERSION_RANGE: (u16, u16); - - /// Returns the default cursor for the given version. - fn new(version: StorageVersion) -> Cursor; - - #[cfg(feature = "try-runtime")] - fn pre_upgrade_step(_version: StorageVersion) -> Result, TryRuntimeError> { - Ok(Vec::new()) - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade_step(_version: StorageVersion, _state: Vec) -> Result<(), TryRuntimeError> { - Ok(()) - } - - /// Execute the migration step until the available weight is consumed. - fn steps(version: StorageVersion, cursor: &[u8], meter: &mut WeightMeter) -> StepResult; - - /// Verify that the migration step fits into `Cursor`, and that `max_step_weight` is not greater - /// than `max_block_weight`. - fn integrity_test(max_block_weight: Weight); - - /// Returns whether migrating from `in_storage` to `target` is supported. - /// - /// A migration is supported if `VERSION_RANGE` is (in_storage + 1, target). - fn is_upgrade_supported(in_storage: StorageVersion, target: StorageVersion) -> bool { - let (low, high) = Self::VERSION_RANGE; - target == high && in_storage + 1 == low - } -} - -/// Performs all necessary migrations based on `StorageVersion`. -/// -/// If `TEST_ALL_STEPS == true` and `try-runtime` is enabled, this will run all the migrations -/// inside `on_runtime_upgrade`. This should be set to false in tests that want to ensure the step -/// by step migration works. -pub struct Migration(PhantomData); - -#[cfg(feature = "try-runtime")] -impl Migration { - fn run_all_steps() -> Result<(), TryRuntimeError> { - let mut meter = &mut WeightMeter::new(); - let name = >::name(); - loop { - let in_progress_version = >::on_chain_storage_version() + 1; - let state = T::Migrations::pre_upgrade_step(in_progress_version)?; - let before = meter.consumed(); - let status = Self::migrate(&mut meter); - log::info!( - target: LOG_TARGET, - "{name}: Migration step {:?} weight = {}", - in_progress_version, - meter.consumed() - before - ); - T::Migrations::post_upgrade_step(in_progress_version, state)?; - if matches!(status, MigrateResult::Completed) { - break - } - } - - let name = >::name(); - log::info!(target: LOG_TARGET, "{name}: Migration steps weight = {}", meter.consumed()); - Ok(()) - } -} - -impl OnRuntimeUpgrade for Migration { - fn on_runtime_upgrade() -> Weight { - let name = >::name(); - let in_code_version = >::in_code_storage_version(); - let on_chain_version = >::on_chain_storage_version(); - - if on_chain_version == in_code_version { - log::warn!( - target: LOG_TARGET, - "{name}: No Migration performed storage_version = latest_version = {:?}", - &on_chain_version - ); - return T::WeightInfo::on_runtime_upgrade_noop() - } - - // In case a migration is already in progress we create the next migration - // (if any) right when the current one finishes. - if Self::in_progress() { - log::warn!( - target: LOG_TARGET, - "{name}: Migration already in progress {:?}", - &on_chain_version - ); - - return T::WeightInfo::on_runtime_upgrade_in_progress() - } - - log::info!( - target: LOG_TARGET, - "{name}: Upgrading storage from {on_chain_version:?} to {in_code_version:?}.", - ); - - let cursor = T::Migrations::new(on_chain_version + 1); - MigrationInProgress::::set(Some(cursor)); - - #[cfg(feature = "try-runtime")] - if TEST_ALL_STEPS { - Self::run_all_steps().unwrap(); - } - - T::WeightInfo::on_runtime_upgrade() - } - - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, TryRuntimeError> { - // We can't really do much here as our migrations do not happen during the runtime upgrade. - // Instead, we call the migrations `pre_upgrade` and `post_upgrade` hooks when we iterate - // over our migrations. - let on_chain_version = >::on_chain_storage_version(); - let in_code_version = >::in_code_storage_version(); - - if on_chain_version == in_code_version { - return Ok(Default::default()) - } - - log::debug!( - target: LOG_TARGET, - "Requested migration of {} from {:?}(on-chain storage version) to {:?}(in-code storage version)", - >::name(), on_chain_version, in_code_version - ); - - ensure!( - T::Migrations::is_upgrade_supported(on_chain_version, in_code_version), - "Unsupported upgrade: VERSION_RANGE should be (on-chain storage version + 1, in-code storage version)" - ); - - Ok(Default::default()) - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(_state: Vec) -> Result<(), TryRuntimeError> { - if !TEST_ALL_STEPS { - return Ok(()) - } - - log::info!(target: LOG_TARGET, "=== POST UPGRADE CHECKS ==="); - - // Ensure that the hashing algorithm is correct for each storage map. - if let Some(hash) = crate::CodeInfoOf::::iter_keys().next() { - crate::CodeInfoOf::::get(hash).expect("CodeInfo exists for hash; qed"); - } - if let Some(hash) = crate::PristineCode::::iter_keys().next() { - crate::PristineCode::::get(hash).expect("PristineCode exists for hash; qed"); - } - if let Some(account_id) = crate::ContractInfoOf::::iter_keys().next() { - crate::ContractInfoOf::::get(account_id) - .expect("ContractInfo exists for account_id; qed"); - } - if let Some(nonce) = crate::DeletionQueue::::iter_keys().next() { - crate::DeletionQueue::::get(nonce).expect("DeletionQueue exists for nonce; qed"); - } - - Ok(()) - } -} - -/// The result of running the migration. -#[derive(Debug, PartialEq)] -pub enum MigrateResult { - /// No migration was performed - NoMigrationPerformed, - /// No migration currently in progress - NoMigrationInProgress, - /// A migration is in progress - InProgress { steps_done: u32 }, - /// All migrations are completed - Completed, -} - -/// The result of running a migration step. -#[derive(Debug, PartialEq)] -pub enum StepResult { - InProgress { cursor: Cursor, steps_done: u32 }, - Completed { steps_done: u32 }, -} - -impl Migration { - /// Verify that each migration's step of the [`Config::Migrations`] sequence fits into - /// `Cursor`. - pub(crate) fn integrity_test() { - let max_weight = ::BlockWeights::get().max_block; - T::Migrations::integrity_test(max_weight) - } - - /// Execute the multi-step migration. - /// Returns whether or not a migration is in progress - pub(crate) fn migrate(mut meter: &mut WeightMeter) -> MigrateResult { - let name = >::name(); - - if meter.try_consume(T::WeightInfo::migrate()).is_err() { - return MigrateResult::NoMigrationPerformed - } - - MigrationInProgress::::mutate_exists(|progress| { - let Some(cursor_before) = progress.as_mut() else { - meter.consume(T::WeightInfo::migration_noop()); - return MigrateResult::NoMigrationInProgress - }; - - // if a migration is running it is always upgrading to the next version - let storage_version = >::on_chain_storage_version(); - let in_progress_version = storage_version + 1; - - log::info!( - target: LOG_TARGET, - "{name}: Migrating from {:?} to {:?},", - storage_version, - in_progress_version, - ); - - let result = - match T::Migrations::steps(in_progress_version, cursor_before.as_ref(), &mut meter) - { - StepResult::InProgress { cursor, steps_done } => { - *progress = Some(cursor); - MigrateResult::InProgress { steps_done } - }, - StepResult::Completed { steps_done } => { - in_progress_version.put::>(); - if >::in_code_storage_version() != in_progress_version { - log::info!( - target: LOG_TARGET, - "{name}: Next migration is {:?},", - in_progress_version + 1 - ); - *progress = Some(T::Migrations::new(in_progress_version + 1)); - MigrateResult::InProgress { steps_done } - } else { - log::info!( - target: LOG_TARGET, - "{name}: All migrations done. At version {:?},", - in_progress_version - ); - *progress = None; - MigrateResult::Completed - } - }, - }; - - result - }) - } - - pub(crate) fn ensure_migrated() -> DispatchResult { - if Self::in_progress() { - Err(Error::::MigrationInProgress.into()) - } else { - Ok(()) - } - } - - pub(crate) fn in_progress() -> bool { - MigrationInProgress::::exists() - } -} - -#[impl_trait_for_tuples::impl_for_tuples(10)] -#[tuple_types_custom_trait_bound(MigrationStep)] -impl MigrateSequence for Tuple { - const VERSION_RANGE: (u16, u16) = { - let mut versions: (u16, u16) = (0, 0); - for_tuples!( - #( - match versions { - (0, 0) => { - versions = (Tuple::VERSION, Tuple::VERSION); - }, - (min_version, last_version) if Tuple::VERSION == last_version + 1 => { - versions = (min_version, Tuple::VERSION); - }, - _ => panic!("Migrations must be ordered by their versions with no gaps.") - } - )* - ); - versions - }; - - fn new(version: StorageVersion) -> Cursor { - for_tuples!( - #( - if version == Tuple::VERSION { - return Tuple::default().encode().try_into().expect(PROOF_ENCODE) - } - )* - ); - invalid_version(version) - } - - #[cfg(feature = "try-runtime")] - /// Execute the pre-checks of the step associated with this version. - fn pre_upgrade_step(version: StorageVersion) -> Result, TryRuntimeError> { - for_tuples!( - #( - if version == Tuple::VERSION { - return Tuple::pre_upgrade_step() - } - )* - ); - invalid_version(version) - } - - #[cfg(feature = "try-runtime")] - /// Execute the post-checks of the step associated with this version. - fn post_upgrade_step(version: StorageVersion, state: Vec) -> Result<(), TryRuntimeError> { - for_tuples!( - #( - if version == Tuple::VERSION { - return Tuple::post_upgrade_step(state) - } - )* - ); - invalid_version(version) - } - - fn steps(version: StorageVersion, mut cursor: &[u8], meter: &mut WeightMeter) -> StepResult { - for_tuples!( - #( - if version == Tuple::VERSION { - let mut migration = ::decode(&mut cursor) - .expect(PROOF_DECODE); - let max_weight = Tuple::max_step_weight(); - let mut steps_done = 0; - while meter.can_consume(max_weight) { - steps_done.saturating_accrue(1); - if matches!(migration.step(meter), IsFinished::Yes) { - return StepResult::Completed{ steps_done } - } - } - return StepResult::InProgress{cursor: migration.encode().try_into().expect(PROOF_ENCODE), steps_done } - } - )* - ); - invalid_version(version) - } - - fn integrity_test(max_block_weight: Weight) { - for_tuples!( - #( - Tuple::integrity_test(max_block_weight); - )* - ); - } -} - -#[cfg(test)] -mod test { - use super::*; - use crate::{ - migration::codegen::LATEST_MIGRATION_VERSION, - tests::{ExtBuilder, Test}, - }; - - #[derive(Default, Encode, Decode, MaxEncodedLen)] - struct MockMigration { - // MockMigration needs `N` steps to finish - count: u16, - } - - impl MigrationStep for MockMigration { - const VERSION: u16 = N; - fn max_step_weight() -> Weight { - Weight::from_all(1) - } - fn step(&mut self, meter: &mut WeightMeter) -> IsFinished { - assert!(self.count != N); - self.count += 1; - meter.consume(Weight::from_all(1)); - if self.count == N { - IsFinished::Yes - } else { - IsFinished::No - } - } - } - - #[test] - fn test_storage_version_matches_last_migration_file() { - assert_eq!(StorageVersion::new(LATEST_MIGRATION_VERSION), crate::pallet::STORAGE_VERSION); - } - - #[test] - fn version_range_works() { - let range = <(MockMigration<1>, MockMigration<2>)>::VERSION_RANGE; - assert_eq!(range, (1, 2)); - } - - #[test] - fn is_upgrade_supported_works() { - type Migrations = (MockMigration<9>, MockMigration<10>, MockMigration<11>); - assert!(Migrations::is_upgrade_supported(StorageVersion::new(8), StorageVersion::new(11))); - assert!(!Migrations::is_upgrade_supported(StorageVersion::new(9), StorageVersion::new(11))); - assert!(!Migrations::is_upgrade_supported(StorageVersion::new(8), StorageVersion::new(12))); - } - - #[test] - fn steps_works() { - type Migrations = (MockMigration<2>, MockMigration<3>); - let version = StorageVersion::new(2); - let mut cursor = Migrations::new(version); - - let mut meter = WeightMeter::with_limit(Weight::from_all(1)); - let result = Migrations::steps(version, &cursor, &mut meter); - cursor = alloc::vec![1u8, 0].try_into().unwrap(); - assert_eq!(result, StepResult::InProgress { cursor: cursor.clone(), steps_done: 1 }); - assert_eq!(meter.consumed(), Weight::from_all(1)); - - let mut meter = WeightMeter::with_limit(Weight::from_all(1)); - assert_eq!( - Migrations::steps(version, &cursor, &mut meter), - StepResult::Completed { steps_done: 1 } - ); - } - - #[test] - fn no_migration_in_progress_works() { - type TestMigration = Migration; - - ExtBuilder::default().build().execute_with(|| { - assert_eq!(StorageVersion::get::>(), LATEST_MIGRATION_VERSION); - assert_eq!( - TestMigration::migrate(&mut WeightMeter::new()), - MigrateResult::NoMigrationInProgress - ) - }); - } - - #[test] - fn migration_works() { - type TestMigration = Migration; - - ExtBuilder::default() - .set_storage_version(LATEST_MIGRATION_VERSION - 2) - .build() - .execute_with(|| { - assert_eq!(StorageVersion::get::>(), LATEST_MIGRATION_VERSION - 2); - TestMigration::on_runtime_upgrade(); - for (version, status) in [ - (LATEST_MIGRATION_VERSION - 1, MigrateResult::InProgress { steps_done: 1 }), - (LATEST_MIGRATION_VERSION, MigrateResult::Completed), - ] { - assert_eq!(TestMigration::migrate(&mut WeightMeter::new()), status); - assert_eq!( - >::on_chain_storage_version(), - StorageVersion::new(version) - ); - } - - assert_eq!( - TestMigration::migrate(&mut WeightMeter::new()), - MigrateResult::NoMigrationInProgress - ); - assert_eq!(StorageVersion::get::>(), LATEST_MIGRATION_VERSION); - }); - } -} diff --git a/substrate/frame/revive/src/primitives.rs b/substrate/frame/revive/src/primitives.rs index 98e8879457bf..1b48527d23d7 100644 --- a/substrate/frame/revive/src/primitives.rs +++ b/substrate/frame/revive/src/primitives.rs @@ -103,8 +103,6 @@ pub enum ContractAccessError { DoesntExist, /// Storage key cannot be decoded from the provided input data. KeyDecodingFailed, - /// Storage is migrating. Try again later. - MigrationInProgress, } /// Output of a contract call or instantiation which ran to completion. diff --git a/substrate/frame/revive/src/storage.rs b/substrate/frame/revive/src/storage.rs index ef7ce2db32cf..9939de1dfd19 100644 --- a/substrate/frame/revive/src/storage.rs +++ b/substrate/frame/revive/src/storage.rs @@ -33,11 +33,12 @@ use codec::{Decode, Encode, MaxEncodedLen}; use core::marker::PhantomData; use frame_support::{ storage::child::{self, ChildInfo}, + traits::IsType, weights::{Weight, WeightMeter}, CloneNoBound, DefaultNoBound, }; use scale_info::TypeInfo; -use sp_core::{ConstU32, Get, H160}; +use sp_core::{ConstU32, Get, H160, H256}; use sp_io::KillStorageResult; use sp_runtime::{ traits::{Hash, Saturating, Zero}, @@ -77,7 +78,10 @@ pub struct ContractInfo { delegate_dependencies: DelegateDependencyMap, } -impl ContractInfo { +impl ContractInfo +where + T::Hash: IsType, +{ /// Constructs a new contract info **without** writing it to storage. /// /// This returns an `Err` if an contract with the supplied `account` already exists diff --git a/substrate/frame/revive/src/storage/meter.rs b/substrate/frame/revive/src/storage/meter.rs index f6ad4c5fc346..9d70ddf85870 100644 --- a/substrate/frame/revive/src/storage/meter.rs +++ b/substrate/frame/revive/src/storage/meter.rs @@ -21,17 +21,17 @@ use crate::{ address::AddressMapper, storage::ContractInfo, AccountIdOf, BalanceOf, CodeInfo, Config, Error, Event, HoldReason, Inspect, Origin, Pallet, StorageDeposit as Deposit, System, LOG_TARGET, }; - use alloc::vec::Vec; use core::{fmt::Debug, marker::PhantomData}; use frame_support::{ traits::{ fungible::{Mutate, MutateHold}, tokens::{Fortitude, Fortitude::Polite, Precision, Preservation, Restriction}, - Get, + Get, IsType, }, DefaultNoBound, RuntimeDebugNoBound, }; +use sp_core::H256; use sp_runtime::{ traits::{Saturating, Zero}, DispatchError, FixedPointNumber, FixedU128, @@ -400,6 +400,7 @@ where impl RawMeter where T: Config, + T::Hash: IsType, E: Ext, { /// Charges `diff` from the meter. @@ -503,7 +504,10 @@ where } } -impl Ext for ReservingExt { +impl Ext for ReservingExt +where + T::Hash: IsType, +{ fn check_limit( origin: &T::AccountId, limit: BalanceOf, diff --git a/substrate/frame/revive/src/test_utils/builder.rs b/substrate/frame/revive/src/test_utils/builder.rs index b17067769c05..b17d7628fb80 100644 --- a/substrate/frame/revive/src/test_utils/builder.rs +++ b/substrate/frame/revive/src/test_utils/builder.rs @@ -54,6 +54,9 @@ macro_rules! builder { impl $name where as HasCompact>::Type: Clone + Eq + PartialEq + Debug + TypeInfo + Encode, + BalanceOf: Into + TryFrom, + crate::MomentOf: Into, + T::Hash: frame_support::traits::IsType, { $( #[doc = concat!("Set the ", stringify!($field))] diff --git a/substrate/frame/revive/src/tests.rs b/substrate/frame/revive/src/tests.rs index 447d55f0dd8d..f2944c7932a6 100644 --- a/substrate/frame/revive/src/tests.rs +++ b/substrate/frame/revive/src/tests.rs @@ -33,7 +33,6 @@ use crate::{ }, exec::Key, limits, - migration::codegen::LATEST_MIGRATION_VERSION, primitives::CodeUploadReturnValue, storage::DeletionQueueManager, test_utils::*, @@ -41,8 +40,8 @@ use crate::{ wasm::Memory, weights::WeightInfo, BalanceOf, Code, CodeInfoOf, CollectEvents, Config, ContractInfo, ContractInfoOf, DebugInfo, - DefaultAddressMapper, DeletionQueueCounter, Error, HoldReason, MigrationInProgress, Origin, - Pallet, PristineCode, H160, + DefaultAddressMapper, DeletionQueueCounter, Error, HoldReason, Origin, Pallet, PristineCode, + H160, }; use crate::test_utils::builder::Contract; @@ -490,7 +489,6 @@ impl Config for Test { type UnsafeUnstableInterface = UnstableInterface; type UploadOrigin = EnsureAccount; type InstantiateOrigin = EnsureAccount; - type Migrations = crate::migration::codegen::BenchMigrations; type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; type Debug = TestDebug; } @@ -523,10 +521,6 @@ impl ExtBuilder { pub fn set_associated_consts(&self) { EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = self.existential_deposit); } - pub fn set_storage_version(mut self, version: u16) -> Self { - self.storage_version = Some(StorageVersion::new(version)); - self - } pub fn build(self) -> sp_io::TestExternalities { sp_tracing::try_init_simple(); self.set_associated_consts(); @@ -593,6 +587,7 @@ impl Default for Origin { mod run_tests { use super::*; use pretty_assertions::{assert_eq, assert_ne}; + use sp_core::U256; // Perform a call to a plain account. // The actual transfer fails because we can only call contracts. @@ -616,66 +611,6 @@ mod run_tests { }); } - #[test] - fn migration_on_idle_hooks_works() { - // Defines expectations of how many migration steps can be done given the weight limit. - let tests = [ - (Weight::zero(), LATEST_MIGRATION_VERSION - 2), - (::WeightInfo::migrate() + 1.into(), LATEST_MIGRATION_VERSION - 1), - (Weight::MAX, LATEST_MIGRATION_VERSION), - ]; - - for (weight, expected_version) in tests { - ExtBuilder::default() - .set_storage_version(LATEST_MIGRATION_VERSION - 2) - .build() - .execute_with(|| { - MigrationInProgress::::set(Some(Default::default())); - Contracts::on_idle(System::block_number(), weight); - assert_eq!(StorageVersion::get::>(), expected_version); - }); - } - } - - #[test] - fn migration_in_progress_works() { - let (wasm, code_hash) = compile_module("dummy").unwrap(); - - ExtBuilder::default().existential_deposit(1).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - MigrationInProgress::::set(Some(Default::default())); - - assert_err!( - Contracts::upload_code( - RuntimeOrigin::signed(ALICE), - vec![], - deposit_limit::(), - ), - Error::::MigrationInProgress, - ); - assert_err!( - Contracts::remove_code(RuntimeOrigin::signed(ALICE), code_hash), - Error::::MigrationInProgress, - ); - assert_err!( - Contracts::set_code(RuntimeOrigin::signed(ALICE), BOB_ADDR, code_hash), - Error::::MigrationInProgress, - ); - assert_err_ignore_postinfo!( - builder::call(BOB_ADDR).build(), - Error::::MigrationInProgress - ); - assert_err_ignore_postinfo!( - builder::instantiate_with_code(wasm).value(100_000).build(), - Error::::MigrationInProgress, - ); - assert_err_ignore_postinfo!( - builder::instantiate(code_hash).value(100_000).build(), - Error::::MigrationInProgress, - ); - }); - } - #[test] fn instantiate_and_call_and_deposit_event() { let (wasm, code_hash) = compile_module("event_and_return_on_deploy").unwrap(); @@ -745,7 +680,7 @@ mod run_tests { contract: addr, data: vec![1, 2, 3, 4] }), - topics: vec![], + topics: vec![H256::repeat_byte(42)], }, EventRecord { phase: Phase::Initialization, @@ -3402,7 +3337,7 @@ mod run_tests { assert_err_ignore_postinfo!( builder::call(addr_caller) .storage_deposit_limit(13) - .data((100u32, &addr_callee, 0u64).encode()) + .data((100u32, &addr_callee, U256::from(0u64)).encode()) .build(), >::StorageDepositLimitExhausted, ); @@ -3416,7 +3351,7 @@ mod run_tests { assert_err_ignore_postinfo!( builder::call(addr_caller) .storage_deposit_limit(14) - .data((101u32, &addr_callee, 0u64).encode()) + .data((101u32, &addr_callee, U256::from(0u64)).encode()) .build(), >::StorageDepositLimitExhausted, ); @@ -3429,7 +3364,7 @@ mod run_tests { assert_err_ignore_postinfo!( builder::call(addr_caller) .storage_deposit_limit(16) - .data((102u32, &addr_callee, 1u64).encode()) + .data((102u32, &addr_callee, U256::from(1u64)).encode()) .build(), >::StorageDepositLimitExhausted, ); @@ -3440,7 +3375,7 @@ mod run_tests { assert_err_ignore_postinfo!( builder::call(addr_caller) .storage_deposit_limit(0) - .data((87u32, &addr_callee, 0u64).encode()) + .data((87u32, &addr_callee, U256::from(0u64)).encode()) .build(), >::StorageDepositLimitExhausted, ); @@ -3450,7 +3385,9 @@ mod run_tests { // Require more than the sender's balance. // We don't set a special limit for the nested call. assert_err_ignore_postinfo!( - builder::call(addr_caller).data((512u32, &addr_callee, 1u64).encode()).build(), + builder::call(addr_caller) + .data((512u32, &addr_callee, U256::from(1u64)).encode()) + .build(), >::StorageDepositLimitExhausted, ); @@ -3459,7 +3396,7 @@ mod run_tests { // enforced as callee frees up storage. This should pass. assert_ok!(builder::call(addr_caller) .storage_deposit_limit(1) - .data((87u32, &addr_callee, 1u64).encode()) + .data((87u32, &addr_callee, U256::from(1u64)).encode()) .build()); }); } @@ -3500,7 +3437,7 @@ mod run_tests { builder::call(addr_caller) .origin(RuntimeOrigin::signed(BOB)) .storage_deposit_limit(callee_info_len + 2 + ED + 1) - .data((0u32, &code_hash_callee, 0u64).encode()) + .data((0u32, &code_hash_callee, U256::from(0u64)).encode()) .build(), >::StorageDepositLimitExhausted, ); @@ -3514,7 +3451,7 @@ mod run_tests { builder::call(addr_caller) .origin(RuntimeOrigin::signed(BOB)) .storage_deposit_limit(callee_info_len + 2 + ED + 2) - .data((1u32, &code_hash_callee, 0u64).encode()) + .data((1u32, &code_hash_callee, U256::from(0u64)).encode()) .build(), >::StorageDepositLimitExhausted, ); @@ -3528,7 +3465,10 @@ mod run_tests { builder::call(addr_caller) .origin(RuntimeOrigin::signed(BOB)) .storage_deposit_limit(callee_info_len + 2 + ED + 2) - .data((0u32, &code_hash_callee, callee_info_len + 2 + ED + 1).encode()) + .data( + (0u32, &code_hash_callee, U256::from(callee_info_len + 2 + ED + 1)) + .encode() + ) .build(), >::StorageDepositLimitExhausted, ); @@ -3543,7 +3483,10 @@ mod run_tests { builder::call(addr_caller) .origin(RuntimeOrigin::signed(BOB)) .storage_deposit_limit(callee_info_len + 2 + ED + 3) // enough parent limit - .data((1u32, &code_hash_callee, callee_info_len + 2 + ED + 2).encode()) + .data( + (1u32, &code_hash_callee, U256::from(callee_info_len + 2 + ED + 2)) + .encode() + ) .build(), >::StorageDepositLimitExhausted, ); @@ -3554,7 +3497,7 @@ mod run_tests { let result = builder::bare_call(addr_caller) .origin(RuntimeOrigin::signed(BOB)) .storage_deposit_limit(callee_info_len + 2 + ED + 4) - .data((1u32, &code_hash_callee, callee_info_len + 2 + ED + 3).encode()) + .data((1u32, &code_hash_callee, U256::from(callee_info_len + 2 + ED + 3)).encode()) .build(); let returned = result.result.unwrap(); diff --git a/substrate/frame/revive/src/wasm/mod.rs b/substrate/frame/revive/src/wasm/mod.rs index 9024390fd24f..5813903326bf 100644 --- a/substrate/frame/revive/src/wasm/mod.rs +++ b/substrate/frame/revive/src/wasm/mod.rs @@ -45,9 +45,9 @@ use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ dispatch::DispatchResult, ensure, - traits::{fungible::MutateHold, tokens::Precision::BestEffort}, + traits::{fungible::MutateHold, tokens::Precision::BestEffort, IsType}, }; -use sp_core::Get; +use sp_core::{Get, H256, U256}; use sp_runtime::DispatchError; /// Validated Wasm module ready for execution. @@ -123,7 +123,11 @@ impl Token for CodeLoadToken { } } -impl WasmBlob { +impl WasmBlob +where + T::Hash: IsType, + BalanceOf: Into + TryFrom, +{ /// We only check for size and nothing else when the code is uploaded. pub fn from_code( code: Vec, @@ -251,7 +255,11 @@ pub struct PreparedCall<'a, E: Ext> { api_version: ApiVersion, } -impl<'a, E: Ext> PreparedCall<'a, E> { +impl<'a, E: Ext> PreparedCall<'a, E> +where + BalanceOf: Into, + BalanceOf: TryFrom, +{ pub fn call(mut self) -> ExecResult { let exec_result = loop { let interrupt = self.instance.run(); @@ -315,7 +323,10 @@ impl WasmBlob { } } -impl Executable for WasmBlob { +impl Executable for WasmBlob +where + BalanceOf: Into + TryFrom, +{ fn from_storage( code_hash: sp_core::H256, gas_meter: &mut GasMeter, diff --git a/substrate/frame/revive/src/wasm/runtime.rs b/substrate/frame/revive/src/wasm/runtime.rs index 51c723493847..528b0ababfa0 100644 --- a/substrate/frame/revive/src/wasm/runtime.rs +++ b/substrate/frame/revive/src/wasm/runtime.rs @@ -19,12 +19,12 @@ use crate::{ address::AddressMapper, - exec::{ExecError, ExecResult, Ext, Key, TopicOf}, + exec::{ExecError, ExecResult, Ext, Key}, gas::{ChargedAmount, Token}, limits, primitives::ExecReturnValue, weights::WeightInfo, - BalanceOf, Config, Error, LOG_TARGET, SENTINEL, + Config, Error, LOG_TARGET, SENTINEL, }; use alloc::{boxed::Box, vec, vec::Vec}; use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; @@ -35,15 +35,22 @@ use frame_support::{ }; use pallet_revive_proc_macro::define_env; use pallet_revive_uapi::{CallFlags, ReturnErrorCode, ReturnFlags, StorageFlags}; -use sp_core::{H160, H256}; +use sp_core::{H160, H256, U256}; use sp_io::hashing::{blake2_128, blake2_256, keccak_256, sha2_256}; -use sp_runtime::{traits::Zero, DispatchError, RuntimeDebug}; +use sp_runtime::{DispatchError, RuntimeDebug}; type CallOf = ::RuntimeCall; /// The maximum nesting depth a contract can use when encoding types. const MAX_DECODE_NESTING: u32 = 256; +/// Encode a `U256` into a 32 byte buffer. +fn as_bytes(u: U256) -> [u8; 32] { + let mut bytes = [0u8; 32]; + u.to_little_endian(&mut bytes); + bytes +} + #[derive(Clone, Copy)] pub enum ApiVersion { /// Expose all APIs even unversioned ones. Only used for testing and benchmarking. @@ -84,6 +91,32 @@ pub trait Memory { Ok(buf) } + /// Same as `read` but reads into a fixed size buffer. + fn read_array(&self, ptr: u32) -> Result<[u8; N], DispatchError> { + let mut buf = [0u8; N]; + self.read_into_buf(ptr, &mut buf)?; + Ok(buf) + } + + /// Read a `u32` from the sandbox memory. + fn read_u32(&self, ptr: u32) -> Result { + let buf: [u8; 4] = self.read_array(ptr)?; + Ok(u32::from_le_bytes(buf)) + } + + /// Read a `U256` from the sandbox memory. + fn read_u256(&self, ptr: u32) -> Result { + let buf: [u8; 32] = self.read_array(ptr)?; + Ok(U256::from_little_endian(&buf)) + } + + /// Read a `H256` from the sandbox memory. + fn read_h256(&self, ptr: u32) -> Result { + let mut code_hash = H256::default(); + self.read_into_buf(ptr, code_hash.as_bytes_mut())?; + Ok(code_hash) + } + /// Read designated chunk from the sandbox memory and attempt to decode into the specified type. /// /// Returns `Err` if one of the following conditions occurs: @@ -647,7 +680,7 @@ impl<'a, E: Ext, M: ?Sized + Memory> Runtime<'a, E, M> { } let buf_len = buf.len() as u32; - let len: u32 = memory.read_as(out_len_ptr)?; + let len = memory.read_u32(out_len_ptr)?; if len < buf_len { return Err(Error::::OutputBufferTooSmall.into()) @@ -963,13 +996,13 @@ impl<'a, E: Ext, M: ?Sized + Memory> Runtime<'a, E, M> { CallType::Call { callee_ptr, value_ptr, deposit_ptr, weight } => { let mut callee = H160::zero(); memory.read_into_buf(callee_ptr, callee.as_bytes_mut())?; - let deposit_limit: BalanceOf<::T> = if deposit_ptr == SENTINEL { - BalanceOf::<::T>::zero() + let deposit_limit = if deposit_ptr == SENTINEL { + U256::zero() } else { - memory.read_as(deposit_ptr)? + memory.read_u256(deposit_ptr)? }; let read_only = flags.contains(CallFlags::READ_ONLY); - let value: BalanceOf<::T> = memory.read_as(value_ptr)?; + let value = memory.read_u256(value_ptr)?; if value > 0u32.into() { // If the call value is non-zero and state change is not allowed, issue an // error. @@ -992,7 +1025,8 @@ impl<'a, E: Ext, M: ?Sized + Memory> Runtime<'a, E, M> { if flags.intersects(CallFlags::ALLOW_REENTRY | CallFlags::READ_ONLY) { return Err(Error::::InvalidCallFlags.into()) } - let code_hash = memory.read_as(code_hash_ptr)?; + + let code_hash = memory.read_h256(code_hash_ptr)?; self.ext.delegate_call(code_hash, input_data) }, }; @@ -1036,19 +1070,15 @@ impl<'a, E: Ext, M: ?Sized + Memory> Runtime<'a, E, M> { salt_ptr: u32, ) -> Result { self.charge_gas(RuntimeCosts::Instantiate { input_data_len })?; - let deposit_limit: BalanceOf<::T> = if deposit_ptr == SENTINEL { - BalanceOf::<::T>::zero() - } else { - memory.read_as(deposit_ptr)? - }; - let value: BalanceOf<::T> = memory.read_as(value_ptr)?; - let code_hash: H256 = memory.read_as(code_hash_ptr)?; + let deposit_limit: U256 = + if deposit_ptr == SENTINEL { U256::zero() } else { memory.read_u256(deposit_ptr)? }; + let value = memory.read_u256(value_ptr)?; + let code_hash = memory.read_h256(code_hash_ptr)?; let input_data = memory.read(input_data_ptr, input_data_len)?; let salt = if salt_ptr == SENTINEL { None } else { - let mut salt = [0u8; 32]; - memory.read_into_buf(salt_ptr, salt.as_mut_slice())?; + let salt: [u8; 32] = memory.read_array(salt_ptr)?; Some(salt) }; let instantiate_outcome = self.ext.instantiate( @@ -1194,7 +1224,7 @@ pub mod env { self.charge_gas(RuntimeCosts::Transfer)?; let mut callee = H160::zero(); memory.read_into_buf(address_ptr, callee.as_bytes_mut())?; - let value: BalanceOf<::T> = memory.read_as(value_ptr)?; + let value: U256 = memory.read_u256(value_ptr)?; let result = self.ext.transfer(&callee, value); match result { Ok(()) => Ok(ReturnErrorCode::Success), @@ -1374,7 +1404,7 @@ pub mod env { self.write_fixed_sandbox_output( memory, out_ptr, - &value.encode(), + &value.as_bytes(), false, already_charged, )?; @@ -1389,11 +1419,11 @@ pub mod env { #[api_version(0)] fn own_code_hash(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::OwnCodeHash)?; - let code_hash_encoded = &self.ext.own_code_hash().encode(); + let code_hash = *self.ext.own_code_hash(); Ok(self.write_fixed_sandbox_output( memory, out_ptr, - code_hash_encoded, + code_hash.as_bytes(), false, already_charged, )?) @@ -1439,14 +1469,12 @@ pub mod env { ref_time_limit: u64, proof_size_limit: u64, out_ptr: u32, - out_len_ptr: u32, ) -> Result<(), TrapReason> { let weight = Weight::from_parts(ref_time_limit, proof_size_limit); self.charge_gas(RuntimeCosts::WeightToFee)?; - Ok(self.write_sandbox_output( + Ok(self.write_fixed_sandbox_output( memory, out_ptr, - out_len_ptr, &self.ext.get_weight_price(weight).encode(), false, already_charged, @@ -1477,18 +1505,12 @@ pub mod env { /// Stores the *free* balance of the current account into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::balance`]. #[api_version(0)] - fn balance( - &mut self, - memory: &mut M, - out_ptr: u32, - out_len_ptr: u32, - ) -> Result<(), TrapReason> { + fn balance(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::Balance)?; - Ok(self.write_sandbox_output( + Ok(self.write_fixed_sandbox_output( memory, out_ptr, - out_len_ptr, - &self.ext.balance().encode(), + &as_bytes(self.ext.balance()), false, already_charged, )?) @@ -1497,18 +1519,12 @@ pub mod env { /// Stores the value transferred along with this call/instantiate into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::value_transferred`]. #[api_version(0)] - fn value_transferred( - &mut self, - memory: &mut M, - out_ptr: u32, - out_len_ptr: u32, - ) -> Result<(), TrapReason> { + fn value_transferred(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::ValueTransferred)?; - Ok(self.write_sandbox_output( + Ok(self.write_fixed_sandbox_output( memory, out_ptr, - out_len_ptr, - &self.ext.value_transferred().encode(), + &as_bytes(self.ext.value_transferred()), false, already_charged, )?) @@ -1517,13 +1533,12 @@ pub mod env { /// Load the latest block timestamp into the supplied buffer /// See [`pallet_revive_uapi::HostFn::now`]. #[api_version(0)] - fn now(&mut self, memory: &mut M, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { + fn now(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::Now)?; - Ok(self.write_sandbox_output( + Ok(self.write_fixed_sandbox_output( memory, out_ptr, - out_len_ptr, - &self.ext.now().encode(), + &as_bytes(self.ext.now()), false, already_charged, )?) @@ -1532,18 +1547,12 @@ pub mod env { /// Stores the minimum balance (a.k.a. existential deposit) into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::minimum_balance`]. #[api_version(0)] - fn minimum_balance( - &mut self, - memory: &mut M, - out_ptr: u32, - out_len_ptr: u32, - ) -> Result<(), TrapReason> { + fn minimum_balance(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::MinimumBalance)?; - Ok(self.write_sandbox_output( + Ok(self.write_fixed_sandbox_output( memory, out_ptr, - out_len_ptr, - &self.ext.minimum_balance().encode(), + &as_bytes(self.ext.minimum_balance()), false, already_charged, )?) @@ -1557,50 +1566,47 @@ pub mod env { &mut self, memory: &mut M, topics_ptr: u32, - topics_len: u32, + num_topic: u32, data_ptr: u32, data_len: u32, ) -> Result<(), TrapReason> { - let num_topic = topics_len - .checked_div(core::mem::size_of::>() as u32) - .ok_or("Zero sized topics are not allowed")?; self.charge_gas(RuntimeCosts::DepositEvent { num_topic, len: data_len })?; + + if num_topic > limits::NUM_EVENT_TOPICS { + return Err(Error::::TooManyTopics.into()); + } + if data_len > self.ext.max_value_size() { return Err(Error::::ValueTooLarge.into()); } - let topics: Vec::T>> = match topics_len { + let topics: Vec = match num_topic { 0 => Vec::new(), - _ => memory.read_as_unbounded(topics_ptr, topics_len)?, + _ => { + let mut v = Vec::with_capacity(num_topic as usize); + let topics_len = num_topic * H256::len_bytes() as u32; + let buf = memory.read(topics_ptr, topics_len)?; + for chunk in buf.chunks_exact(H256::len_bytes()) { + v.push(H256::from_slice(chunk)); + } + v + }, }; - // If there are more than `event_topics`, then trap. - if topics.len() as u32 > limits::NUM_EVENT_TOPICS { - return Err(Error::::TooManyTopics.into()); - } - let event_data = memory.read(data_ptr, data_len)?; - self.ext.deposit_event(topics, event_data); - Ok(()) } /// Stores the current block number of the current contract into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::block_number`]. #[api_version(0)] - fn block_number( - &mut self, - memory: &mut M, - out_ptr: u32, - out_len_ptr: u32, - ) -> Result<(), TrapReason> { + fn block_number(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::BlockNumber)?; - Ok(self.write_sandbox_output( + Ok(self.write_fixed_sandbox_output( memory, out_ptr, - out_len_ptr, - &self.ext.block_number().encode(), + &as_bytes(self.ext.block_number()), false, already_charged, )?) @@ -1884,7 +1890,7 @@ pub mod env { code_hash_ptr: u32, ) -> Result { self.charge_gas(RuntimeCosts::SetCodeHash)?; - let code_hash: H256 = memory.read_as(code_hash_ptr)?; + let code_hash: H256 = memory.read_h256(code_hash_ptr)?; match self.ext.set_code_hash(code_hash) { Err(err) => { let code = Self::err_into_return_code(err)?; @@ -1926,7 +1932,7 @@ pub mod env { code_hash_ptr: u32, ) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::LockDelegateDependency)?; - let code_hash = memory.read_as(code_hash_ptr)?; + let code_hash = memory.read_h256(code_hash_ptr)?; self.ext.lock_delegate_dependency(code_hash)?; Ok(()) } @@ -1941,7 +1947,7 @@ pub mod env { code_hash_ptr: u32, ) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::UnlockDelegateDependency)?; - let code_hash = memory.read_as(code_hash_ptr)?; + let code_hash = memory.read_h256(code_hash_ptr)?; self.ext.unlock_delegate_dependency(&code_hash)?; Ok(()) } diff --git a/substrate/frame/revive/src/weights.rs b/substrate/frame/revive/src/weights.rs index 7974cc1260e4..8913592c13bb 100644 --- a/substrate/frame/revive/src/weights.rs +++ b/substrate/frame/revive/src/weights.rs @@ -51,19 +51,6 @@ use core::marker::PhantomData; pub trait WeightInfo { fn on_process_deletion_queue_batch() -> Weight; fn on_initialize_per_trie_key(k: u32, ) -> Weight; - fn v9_migration_step(c: u32, ) -> Weight; - fn v10_migration_step() -> Weight; - fn v11_migration_step(k: u32, ) -> Weight; - fn v12_migration_step(c: u32, ) -> Weight; - fn v13_migration_step() -> Weight; - fn v14_migration_step() -> Weight; - fn v15_migration_step() -> Weight; - fn v16_migration_step() -> Weight; - fn migration_noop() -> Weight; - fn migrate() -> Weight; - fn on_runtime_upgrade_noop() -> Weight; - fn on_runtime_upgrade_in_progress() -> Weight; - fn on_runtime_upgrade() -> Weight; fn call_with_code_per_byte(c: u32, ) -> Weight; fn instantiate_with_code(c: u32, i: u32) -> Weight; fn instantiate(i: u32) -> Weight; @@ -162,182 +149,6 @@ impl WeightInfo for SubstrateWeight { .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(k.into())) } - /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:2 w:1) - /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:2 w:1) - /// The range of component `c` is `[0, 125952]`. - fn v9_migration_step(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `211 + c * (1 ±0)` - // Estimated: `6149 + c * (1 ±0)` - // Minimum execution time: 7_783_000 picoseconds. - Weight::from_parts(4_462_075, 6149) - // Standard Error: 5 - .saturating_add(Weight::from_parts(1_634, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) - } - /// Storage: `Contracts::ContractInfoOf` (r:2 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - fn v10_migration_step() -> Weight { - // Proof Size summary in bytes: - // Measured: `510` - // Estimated: `6450` - // Minimum execution time: 15_971_000 picoseconds. - Weight::from_parts(16_730_000, 6450) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: `Contracts::DeletionQueue` (r:1 w:1025) - /// Proof: `Contracts::DeletionQueue` (`max_values`: None, `max_size`: Some(142), added: 2617, mode: `Measured`) - /// Storage: `Contracts::DeletionQueueCounter` (r:0 w:1) - /// Proof: `Contracts::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// The range of component `k` is `[0, 1024]`. - fn v11_migration_step(k: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `171 + k * (1 ±0)` - // Estimated: `3635 + k * (1 ±0)` - // Minimum execution time: 3_149_000 picoseconds. - Weight::from_parts(3_264_000, 3635) - // Standard Error: 559 - .saturating_add(Weight::from_parts(1_111_209, 0).saturating_mul(k.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(k.into())) - } - /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553053f13fd319a03c211337c76e0fe776df` (r:2 w:0) - /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553053f13fd319a03c211337c76e0fe776df` (r:2 w:0) - /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:1 w:1) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:0 w:1) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// The range of component `c` is `[0, 125952]`. - fn v12_migration_step(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `325 + c * (1 ±0)` - // Estimated: `6263 + c * (1 ±0)` - // Minimum execution time: 15_072_000 picoseconds. - Weight::from_parts(15_721_891, 6263) - // Standard Error: 2 - .saturating_add(Weight::from_parts(428, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) - } - /// Storage: `Contracts::ContractInfoOf` (r:2 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - fn v13_migration_step() -> Weight { - // Proof Size summary in bytes: - // Measured: `440` - // Estimated: `6380` - // Minimum execution time: 12_047_000 picoseconds. - Weight::from_parts(12_500_000, 6380) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: `Contracts::CodeInfoOf` (r:2 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) - fn v14_migration_step() -> Weight { - // Proof Size summary in bytes: - // Measured: `352` - // Estimated: `6292` - // Minimum execution time: 47_488_000 picoseconds. - Weight::from_parts(48_482_000, 6292) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: `Contracts::ContractInfoOf` (r:2 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `System::Account` (r:2 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - fn v15_migration_step() -> Weight { - // Proof Size summary in bytes: - // Measured: `594` - // Estimated: `6534` - // Minimum execution time: 52_801_000 picoseconds. - Weight::from_parts(54_230_000, 6534) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) - } - /// Storage: `Contracts::ContractInfoOf` (r:2 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - fn v16_migration_step() -> Weight { - // Proof Size summary in bytes: - // Measured: `409` - // Estimated: `6349` - // Minimum execution time: 11_618_000 picoseconds. - Weight::from_parts(12_068_000, 6349) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:1) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - fn migration_noop() -> Weight { - // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `1627` - // Minimum execution time: 2_131_000 picoseconds. - Weight::from_parts(2_255_000, 1627) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:1) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:1) - fn migrate() -> Weight { - // Proof Size summary in bytes: - // Measured: `166` - // Estimated: `3631` - // Minimum execution time: 10_773_000 picoseconds. - Weight::from_parts(11_118_000, 3631) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) - } - /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) - fn on_runtime_upgrade_noop() -> Weight { - // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `3607` - // Minimum execution time: 4_371_000 picoseconds. - Weight::from_parts(4_624_000, 3607) - .saturating_add(T::DbWeight::get().reads(1_u64)) - } - /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - fn on_runtime_upgrade_in_progress() -> Weight { - // Proof Size summary in bytes: - // Measured: `167` - // Estimated: `3632` - // Minimum execution time: 5_612_000 picoseconds. - Weight::from_parts(5_838_000, 3632) - .saturating_add(T::DbWeight::get().reads(2_u64)) - } - /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) - /// Storage: `Contracts::MigrationInProgress` (r:1 w:1) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - fn on_runtime_upgrade() -> Weight { - // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `3607` - // Minimum execution time: 5_487_000 picoseconds. - Weight::from_parts(5_693_000, 3607) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) @@ -1152,182 +963,6 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(k.into())) } - /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:2 w:1) - /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:2 w:1) - /// The range of component `c` is `[0, 125952]`. - fn v9_migration_step(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `211 + c * (1 ±0)` - // Estimated: `6149 + c * (1 ±0)` - // Minimum execution time: 7_783_000 picoseconds. - Weight::from_parts(4_462_075, 6149) - // Standard Error: 5 - .saturating_add(Weight::from_parts(1_634, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) - } - /// Storage: `Contracts::ContractInfoOf` (r:2 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - fn v10_migration_step() -> Weight { - // Proof Size summary in bytes: - // Measured: `510` - // Estimated: `6450` - // Minimum execution time: 15_971_000 picoseconds. - Weight::from_parts(16_730_000, 6450) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: `Contracts::DeletionQueue` (r:1 w:1025) - /// Proof: `Contracts::DeletionQueue` (`max_values`: None, `max_size`: Some(142), added: 2617, mode: `Measured`) - /// Storage: `Contracts::DeletionQueueCounter` (r:0 w:1) - /// Proof: `Contracts::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// The range of component `k` is `[0, 1024]`. - fn v11_migration_step(k: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `171 + k * (1 ±0)` - // Estimated: `3635 + k * (1 ±0)` - // Minimum execution time: 3_149_000 picoseconds. - Weight::from_parts(3_264_000, 3635) - // Standard Error: 559 - .saturating_add(Weight::from_parts(1_111_209, 0).saturating_mul(k.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(k.into())) - } - /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553053f13fd319a03c211337c76e0fe776df` (r:2 w:0) - /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553053f13fd319a03c211337c76e0fe776df` (r:2 w:0) - /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:1 w:1) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:0 w:1) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// The range of component `c` is `[0, 125952]`. - fn v12_migration_step(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `325 + c * (1 ±0)` - // Estimated: `6263 + c * (1 ±0)` - // Minimum execution time: 15_072_000 picoseconds. - Weight::from_parts(15_721_891, 6263) - // Standard Error: 2 - .saturating_add(Weight::from_parts(428, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) - } - /// Storage: `Contracts::ContractInfoOf` (r:2 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - fn v13_migration_step() -> Weight { - // Proof Size summary in bytes: - // Measured: `440` - // Estimated: `6380` - // Minimum execution time: 12_047_000 picoseconds. - Weight::from_parts(12_500_000, 6380) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: `Contracts::CodeInfoOf` (r:2 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) - fn v14_migration_step() -> Weight { - // Proof Size summary in bytes: - // Measured: `352` - // Estimated: `6292` - // Minimum execution time: 47_488_000 picoseconds. - Weight::from_parts(48_482_000, 6292) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: `Contracts::ContractInfoOf` (r:2 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `System::Account` (r:2 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - fn v15_migration_step() -> Weight { - // Proof Size summary in bytes: - // Measured: `594` - // Estimated: `6534` - // Minimum execution time: 52_801_000 picoseconds. - Weight::from_parts(54_230_000, 6534) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - } - /// Storage: `Contracts::ContractInfoOf` (r:2 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - fn v16_migration_step() -> Weight { - // Proof Size summary in bytes: - // Measured: `409` - // Estimated: `6349` - // Minimum execution time: 11_618_000 picoseconds. - Weight::from_parts(12_068_000, 6349) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:1) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - fn migration_noop() -> Weight { - // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `1627` - // Minimum execution time: 2_131_000 picoseconds. - Weight::from_parts(2_255_000, 1627) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:1) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:1) - fn migrate() -> Weight { - // Proof Size summary in bytes: - // Measured: `166` - // Estimated: `3631` - // Minimum execution time: 10_773_000 picoseconds. - Weight::from_parts(11_118_000, 3631) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - } - /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) - fn on_runtime_upgrade_noop() -> Weight { - // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `3607` - // Minimum execution time: 4_371_000 picoseconds. - Weight::from_parts(4_624_000, 3607) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - } - /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - fn on_runtime_upgrade_in_progress() -> Weight { - // Proof Size summary in bytes: - // Measured: `167` - // Estimated: `3632` - // Minimum execution time: 5_612_000 picoseconds. - Weight::from_parts(5_838_000, 3632) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - } - /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) - /// Storage: `Contracts::MigrationInProgress` (r:1 w:1) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - fn on_runtime_upgrade() -> Weight { - // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `3607` - // Minimum execution time: 5_487_000 picoseconds. - Weight::from_parts(5_693_000, 3607) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) diff --git a/substrate/frame/revive/uapi/src/host.rs b/substrate/frame/revive/uapi/src/host.rs index f52ea9574025..101ae9aca465 100644 --- a/substrate/frame/revive/uapi/src/host.rs +++ b/substrate/frame/revive/uapi/src/host.rs @@ -58,21 +58,17 @@ pub trait HostFn: private::Sealed { /// Stores the *free* balance of the current account into the supplied buffer. /// - /// If the available space in `output` is less than the size of the value a trap is triggered. - /// /// # Parameters /// /// - `output`: A reference to the output data buffer to write the balance. - fn balance(output: &mut &mut [u8]); + fn balance(output: &mut [u8; 32]); /// Stores the current block number of the current contract into the supplied buffer. /// - /// If the available space in `output` is less than the size of the value a trap is triggered. - /// /// # Parameters /// /// - `output`: A reference to the output data buffer to write the block number. - fn block_number(output: &mut &mut [u8]); + fn block_number(output: &mut [u8; 32]); /// Call (possibly transferring some amount of funds) into the specified account. /// @@ -83,11 +79,10 @@ pub trait HostFn: private::Sealed { /// otherwise. /// - `ref_time_limit`: how much *ref_time* Weight to devote to the execution. /// - `proof_size_limit`: how much *proof_size* Weight to devote to the execution. - /// - `deposit`: The storage deposit limit for instantiation. Should be decodable as a - /// `Option`. Traps otherwise. Passing `None` means setting no specific limit for - /// the call, which implies storage usage up to the limit of the parent call. - /// - `value`: The value to transfer into the contract. Should be decodable as a `T::Balance`. - /// Traps otherwise. + /// - `deposit`: The storage deposit limit for instantiation. Passing `None` means setting no + /// specific limit for the call, which implies storage usage up to the limit of the parent + /// call. + /// - `value`: The value to transfer into the contract. /// - `input`: The input data buffer used to call the contract. /// - `output`: A reference to the output data buffer to write the call output buffer. If `None` /// is provided then the output buffer is not copied. @@ -106,8 +101,8 @@ pub trait HostFn: private::Sealed { callee: &[u8; 20], ref_time_limit: u64, proof_size_limit: u64, - deposit: Option<&[u8]>, - value: &[u8], + deposit: Option<&[u8; 32]>, + value: &[u8; 32], input_data: &[u8], output: Option<&mut &mut [u8]>, ) -> Result; @@ -287,8 +282,8 @@ pub trait HostFn: private::Sealed { /// /// # Parameters /// - /// - `topics`: The topics list encoded as `Vec`. It can't contain duplicates. - fn deposit_event(topics: &[u8], data: &[u8]); + /// - `topics`: The topics list. It can't contain duplicates. + fn deposit_event(topics: &[[u8; 32]], data: &[u8]); /// Recovers the ECDSA public key from the given message hash and signature. /// @@ -374,11 +369,10 @@ pub trait HostFn: private::Sealed { /// - `code_hash`: The hash of the code to be instantiated. /// - `ref_time_limit`: how much *ref_time* Weight to devote to the execution. /// - `proof_size_limit`: how much *proof_size* Weight to devote to the execution. - /// - `deposit`: The storage deposit limit for instantiation. Should be decodable as a - /// `Option`. Traps otherwise. Passing `None` means setting no specific limit for - /// the call, which implies storage usage up to the limit of the parent call. - /// - `value`: The value to transfer into the contract. Should be decodable as a `T::Balance`. - /// Traps otherwise. + /// - `deposit`: The storage deposit limit for instantiation. Passing `None` means setting no + /// specific limit for the call, which implies storage usage up to the limit of the parent + /// call. + /// - `value`: The value to transfer into the contract. /// - `input`: The input data buffer. /// - `address`: A reference to the address buffer to write the address of the contract. If /// `None` is provided then the output buffer is not copied. @@ -402,8 +396,8 @@ pub trait HostFn: private::Sealed { code_hash: &[u8; 32], ref_time_limit: u64, proof_size_limit: u64, - deposit: Option<&[u8]>, - value: &[u8], + deposit: Option<&[u8; 32]>, + value: &[u8; 32], input: &[u8], address: Option<&mut [u8; 20]>, output: Option<&mut &mut [u8]>, @@ -422,14 +416,11 @@ pub trait HostFn: private::Sealed { fn is_contract(address: &[u8; 20]) -> bool; /// Stores the minimum balance (a.k.a. existential deposit) into the supplied buffer. - /// The data is encoded as `T::Balance`. - /// - /// If the available space in `output` is less than the size of the value a trap is triggered. /// /// # Parameters /// /// - `output`: A reference to the output data buffer to write the minimum balance. - fn minimum_balance(output: &mut &mut [u8]); + fn minimum_balance(output: &mut [u8; 32]); /// Retrieve the code hash of the currently executing contract. /// @@ -440,12 +431,10 @@ pub trait HostFn: private::Sealed { /// Load the latest block timestamp into the supplied buffer /// - /// If the available space in `output` is less than the size of the value a trap is triggered. - /// /// # Parameters /// /// - `output`: A reference to the output data buffer to write the timestamp. - fn now(output: &mut &mut [u8]); + fn now(output: &mut [u8; 32]); /// Removes the delegate dependency from the contract. /// @@ -548,12 +537,12 @@ pub trait HostFn: private::Sealed { /// # Parameters /// /// - `address`: The address of the account to transfer funds to. - /// - `value`: The value to transfer. Should be decodable as a `T::Balance`. Traps otherwise. + /// - `value`: The U256 value to transfer. /// /// # Errors /// /// - [TransferFailed][`crate::ReturnErrorCode::TransferFailed] - fn transfer(address: &[u8; 20], value: &[u8]) -> Result; + fn transfer(address: &[u8; 20], value: &[u8; 32]) -> Result; /// Remove the calling account and transfer remaining **free** balance. /// @@ -573,26 +562,20 @@ pub trait HostFn: private::Sealed { fn terminate(beneficiary: &[u8; 20]) -> !; /// Stores the value transferred along with this call/instantiate into the supplied buffer. - /// The data is encoded as `T::Balance`. - /// - /// If the available space in `output` is less than the size of the value a trap is triggered. /// /// # Parameters /// /// - `output`: A reference to the output data buffer to write the transferred value. - fn value_transferred(output: &mut &mut [u8]); + fn value_transferred(output: &mut [u8; 32]); /// Stores the price for the specified amount of gas into the supplied buffer. - /// The data is encoded as `T::Balance`. - /// - /// If the available space in `output` is less than the size of the value a trap is triggered. /// /// # Parameters /// /// - `ref_time_limit`: The *ref_time* Weight limit to query the price for. /// - `proof_size_limit`: The *proof_size* Weight limit to query the price for. /// - `output`: A reference to the output data buffer to write the price. - fn weight_to_fee(ref_time_limit: u64, proof_size_limit: u64, output: &mut &mut [u8]); + fn weight_to_fee(ref_time_limit: u64, proof_size_limit: u64, output: &mut [u8; 32]); /// Execute an XCM program locally, using the contract's address as the origin. /// This is equivalent to dispatching `pallet_xcm::execute` through call_runtime, except that diff --git a/substrate/frame/revive/uapi/src/host/riscv32.rs b/substrate/frame/revive/uapi/src/host/riscv32.rs index c8218bb8f737..b7b660c40837 100644 --- a/substrate/frame/revive/uapi/src/host/riscv32.rs +++ b/substrate/frame/revive/uapi/src/host/riscv32.rs @@ -79,24 +79,19 @@ mod sys { pub fn caller_is_origin() -> ReturnCode; pub fn caller_is_root() -> ReturnCode; pub fn address(out_ptr: *mut u8); - pub fn weight_to_fee( - ref_time: u64, - proof_size: u64, - out_ptr: *mut u8, - out_len_ptr: *mut u32, - ); + pub fn weight_to_fee(ref_time: u64, proof_size: u64, out_ptr: *mut u8); pub fn weight_left(out_ptr: *mut u8, out_len_ptr: *mut u32); - pub fn balance(out_ptr: *mut u8, out_len_ptr: *mut u32); - pub fn value_transferred(out_ptr: *mut u8, out_len_ptr: *mut u32); - pub fn now(out_ptr: *mut u8, out_len_ptr: *mut u32); - pub fn minimum_balance(out_ptr: *mut u8, out_len_ptr: *mut u32); + pub fn balance(out_ptr: *mut u8); + pub fn value_transferred(out_ptr: *mut u8); + pub fn now(out_ptr: *mut u8); + pub fn minimum_balance(out_ptr: *mut u8); pub fn deposit_event( - topics_ptr: *const u8, - topics_len: u32, + topics_ptr: *const [u8; 32], + num_topic: u32, data_ptr: *const u8, data_len: u32, ); - pub fn block_number(out_ptr: *mut u8, out_len_ptr: *mut u32); + pub fn block_number(out_ptr: *mut u8); pub fn hash_sha2_256(input_ptr: *const u8, input_len: u32, out_ptr: *mut u8); pub fn hash_keccak_256(input_ptr: *const u8, input_len: u32, out_ptr: *mut u8); pub fn hash_blake2_256(input_ptr: *const u8, input_len: u32, out_ptr: *mut u8); @@ -136,21 +131,20 @@ mod sys { } } +/// A macro to implement all Host functions with a signature of `fn(&mut [u8; n])`. macro_rules! impl_wrapper_for { - ( $( $name:ident, )* ) => { - $( - fn $name(output: &mut &mut [u8]) { - let mut output_len = output.len() as u32; - unsafe { - sys::$name( - output.as_mut_ptr(), - &mut output_len, - ) - } - extract_from_slice(output, output_len as usize) - } - )* - } + (@impl_fn $name:ident, $n: literal) => { + fn $name(output: &mut [u8; $n]) { + unsafe { sys::$name(output.as_mut_ptr()) } + } + }; + + () => {}; + + ([u8; $n: literal] => $($name:ident),*; $($tail:tt)*) => { + $(impl_wrapper_for!(@impl_fn $name, $n);)* + impl_wrapper_for!($($tail)*); + }; } macro_rules! impl_hash_fn { @@ -185,7 +179,7 @@ fn ptr_len_or_sentinel(data: &mut Option<&mut &mut [u8]>) -> (*mut u8, u32) { } #[inline(always)] -fn ptr_or_sentinel(data: &Option<&[u8]>) -> *const u8 { +fn ptr_or_sentinel(data: &Option<&[u8; 32]>) -> *const u8 { match data { Some(ref data) => data.as_ptr(), None => crate::SENTINEL as _, @@ -197,8 +191,8 @@ impl HostFn for HostFnImpl { code_hash: &[u8; 32], ref_time_limit: u64, proof_size_limit: u64, - deposit_limit: Option<&[u8]>, - value: &[u8], + deposit_limit: Option<&[u8; 32]>, + value: &[u8; 32], input: &[u8], mut address: Option<&mut [u8; 20]>, mut output: Option<&mut &mut [u8]>, @@ -253,8 +247,8 @@ impl HostFn for HostFnImpl { callee: &[u8; 20], ref_time_limit: u64, proof_size_limit: u64, - deposit_limit: Option<&[u8]>, - value: &[u8], + deposit_limit: Option<&[u8; 32]>, + value: &[u8; 32], input: &[u8], mut output: Option<&mut &mut [u8]>, ) -> Result { @@ -327,12 +321,12 @@ impl HostFn for HostFnImpl { ret_code.into() } - fn transfer(address: &[u8; 20], value: &[u8]) -> Result { + fn transfer(address: &[u8; 20], value: &[u8; 32]) -> Result { let ret_code = unsafe { sys::transfer(address.as_ptr(), value.as_ptr()) }; ret_code.into() } - fn deposit_event(topics: &[u8], data: &[u8]) { + fn deposit_event(topics: &[[u8; 32]], data: &[u8]) { unsafe { sys::deposit_event( topics.as_ptr(), @@ -449,33 +443,19 @@ impl HostFn for HostFnImpl { ret_code.into() } - fn address(output: &mut [u8; 20]) { - unsafe { sys::address(output.as_mut_ptr()) } - } - - fn caller(output: &mut [u8; 20]) { - unsafe { sys::caller(output.as_mut_ptr()) } - } - impl_wrapper_for! { - block_number, balance, - value_transferred,now, minimum_balance, - weight_left, + [u8; 32] => block_number, balance, value_transferred, now, minimum_balance; + [u8; 20] => address, caller; } - fn weight_to_fee(ref_time_limit: u64, proof_size_limit: u64, output: &mut &mut [u8]) { + fn weight_left(output: &mut &mut [u8]) { let mut output_len = output.len() as u32; - { - unsafe { - sys::weight_to_fee( - ref_time_limit, - proof_size_limit, - output.as_mut_ptr(), - &mut output_len, - ) - }; - } - extract_from_slice(output, output_len as usize); + unsafe { sys::weight_left(output.as_mut_ptr(), &mut output_len) } + extract_from_slice(output, output_len as usize) + } + + fn weight_to_fee(ref_time_limit: u64, proof_size_limit: u64, output: &mut [u8; 32]) { + unsafe { sys::weight_to_fee(ref_time_limit, proof_size_limit, output.as_mut_ptr()) }; } impl_hash_fn!(sha2_256, 32); From 083f5273fa9d03b2188c0c41aa21f8b45c59a733 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Sep 2024 07:05:54 +0000 Subject: [PATCH 30/66] Bump the known_good_semver group with 3 updates (#5636) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps the known_good_semver group with 3 updates: [serde](https://github.com/serde-rs/serde), [serde_derive](https://github.com/serde-rs/serde) and [serde_json](https://github.com/serde-rs/json). Updates `serde` from 1.0.209 to 1.0.210
Release notes

Sourced from serde's releases.

v1.0.210

  • Support serializing and deserializing IpAddr and SocketAddr in no-std mode on Rust 1.77+ (#2816, thanks @​MathiasKoch)
  • Make serde::ser::StdError and serde::de::StdError equivalent to core::error::Error on Rust 1.81+ (#2818)
Commits
  • 89c4b02 Release 1.0.210
  • eeb8e44 Merge pull request #2818 from dtolnay/coreerror
  • 785c2d9 Stabilize no-std StdError trait
  • d549f04 Reformat parse_ip_impl definition and calls
  • 4c0dd63 Delete attr support from core::net deserialization macros
  • 26fb134 Relocate cfg attrs out of parse_ip_impl and parse_socket_impl
  • 07e614b Merge pull request #2817 from dtolnay/corenet
  • b1f899f Delete doc(cfg) attribute from impls that are supported in no-std
  • b4f860e Merge pull request #2816 from MathiasKoch/chore/core-net
  • d940fe1 Reuse existing Buf wrapper as replacement for std::io::Write
  • Additional commits viewable in compare view

Updates `serde_derive` from 1.0.209 to 1.0.210
Release notes

Sourced from serde_derive's releases.

v1.0.210

  • Support serializing and deserializing IpAddr and SocketAddr in no-std mode on Rust 1.77+ (#2816, thanks @​MathiasKoch)
  • Make serde::ser::StdError and serde::de::StdError equivalent to core::error::Error on Rust 1.81+ (#2818)
Commits
  • 89c4b02 Release 1.0.210
  • eeb8e44 Merge pull request #2818 from dtolnay/coreerror
  • 785c2d9 Stabilize no-std StdError trait
  • d549f04 Reformat parse_ip_impl definition and calls
  • 4c0dd63 Delete attr support from core::net deserialization macros
  • 26fb134 Relocate cfg attrs out of parse_ip_impl and parse_socket_impl
  • 07e614b Merge pull request #2817 from dtolnay/corenet
  • b1f899f Delete doc(cfg) attribute from impls that are supported in no-std
  • b4f860e Merge pull request #2816 from MathiasKoch/chore/core-net
  • d940fe1 Reuse existing Buf wrapper as replacement for std::io::Write
  • Additional commits viewable in compare view

Updates `serde_json` from 1.0.127 to 1.0.128
Release notes

Sourced from serde_json's releases.

1.0.128

  • Support serializing maps containing 128-bit integer keys to serde_json::Value (#1188, thanks @​Mrreadiness)
Commits
  • d96b1d9 Release 1.0.128
  • 599228d Merge pull request #1188 from Mrreadiness/feat/add-hashmap-key-128-serializer
  • 5416cee feat: add support for 128 bit HashMap key serialization
  • 27a4ca9 Upload CI Cargo.lock for reproducing failures
  • See full diff in compare view

Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore major version` will close this group update PR and stop Dependabot creating any more for the specific dependency's major version (unless you unignore this specific dependency's major version or upgrade to it yourself) - `@dependabot ignore minor version` will close this group update PR and stop Dependabot creating any more for the specific dependency's minor version (unless you unignore this specific dependency's minor version or upgrade to it yourself) - `@dependabot ignore ` will close this group update PR and stop Dependabot creating any more for the specific dependency (unless you unignore this specific dependency or upgrade to it yourself) - `@dependabot unignore ` will remove all of the ignore conditions of the specified dependency - `@dependabot unignore ` will remove the ignore condition of the specified dependency and ignore conditions
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 12 ++++++------ Cargo.toml | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 508fc2e89221..e8510a4c6a5b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -19104,9 +19104,9 @@ checksum = "f97841a747eef040fcd2e7b3b9a220a7205926e60488e673d9e4926d27772ce5" [[package]] name = "serde" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] @@ -19131,9 +19131,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2 1.0.82", "quote 1.0.37", @@ -19162,9 +19162,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.127" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8043c06d9f82bd7271361ed64f415fe5e12a77fdb52e573e7f06a516dea329ad" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ "indexmap 2.2.3", "itoa", diff --git a/Cargo.toml b/Cargo.toml index 6bd401d8e15f..50c7225c749e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1185,10 +1185,10 @@ secp256k1 = { version = "0.28.0", default-features = false } secrecy = { version = "0.8.0", default-features = false } seedling-runtime = { path = "cumulus/parachains/runtimes/starters/seedling" } separator = { version = "0.4.1" } -serde = { version = "1.0.209", default-features = false } +serde = { version = "1.0.210", default-features = false } serde-big-array = { version = "0.3.2" } serde_derive = { version = "1.0.117" } -serde_json = { version = "1.0.127", default-features = false } +serde_json = { version = "1.0.128", default-features = false } serde_yaml = { version = "0.9" } serial_test = { version = "2.0.0" } sha1 = { version = "0.10.6" } From def35b92d120a78869cc2e06d34d575eea80faff Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Sep 2024 07:06:51 +0000 Subject: [PATCH 31/66] Bump lazy_static from 1.4.0 to 1.5.0 (#5639) Bumps [lazy_static](https://github.com/rust-lang-nursery/lazy-static.rs) from 1.4.0 to 1.5.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=lazy_static&package-manager=cargo&previous-version=1.4.0&new-version=1.5.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e8510a4c6a5b..f6071a97915f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7729,9 +7729,9 @@ dependencies = [ [[package]] name = "lazy_static" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "lazycell" diff --git a/Cargo.toml b/Cargo.toml index 50c7225c749e..174811663deb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -822,7 +822,7 @@ kvdb-memorydb = { version = "0.13.0" } kvdb-rocksdb = { version = "0.19.0" } kvdb-shared-tests = { version = "0.11.0" } landlock = { version = "0.3.0" } -lazy_static = { version = "1.4.0" } +lazy_static = { version = "1.5.0" } libc = { version = "0.2.155" } libfuzzer-sys = { version = "0.4" } libp2p = { version = "0.52.4" } From f5783cc68b53b06decb97cdddbfb511280ffdc45 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Sep 2024 07:07:23 +0000 Subject: [PATCH 32/66] Bump proc-macro2 from 1.0.82 to 1.0.86 (#5638) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [proc-macro2](https://github.com/dtolnay/proc-macro2) from 1.0.82 to 1.0.86.
Release notes

Sourced from proc-macro2's releases.

1.0.86

  • Documentation improvements

1.0.85

  • Mark some tests as only for 64-bit targets (#463)

1.0.84

1.0.83

  • Optimize the representation of Ident (#462)
Commits
  • aa9476b Release 1.0.86
  • 1961358 Merge pull request #466 from dtolnay/buildrs
  • e1bd2cc Bring build script comments up to date
  • 5b27127 Merge pull request #465 from dtolnay/ignorereason
  • 0da4629 Fill in ignore reasons in all #[ignore] attributes
  • 5ee1cab Release 1.0.85
  • aa64c20 Merge pull request #464 from dtolnay/testsize
  • bc9f4d9 Ignore size tests on non-64bit target
  • 1160ec3 Make size tests #[ignore] in cfg(randomize_layout)
  • 33c9578 Release 1.0.84
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=proc-macro2&package-manager=cargo&previous-version=1.0.82&new-version=1.0.86)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 220 ++++++++++++++++++++++++++--------------------------- Cargo.toml | 2 +- 2 files changed, 111 insertions(+), 111 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f6071a97915f..e1b0b3f8f9e9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -157,7 +157,7 @@ dependencies = [ "dunce", "heck 0.4.1", "proc-macro-error", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", "syn-solidity", @@ -284,7 +284,7 @@ dependencies = [ "include_dir", "itertools 0.10.5", "proc-macro-error", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -518,7 +518,7 @@ checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" dependencies = [ "num-bigint", "num-traits", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -620,7 +620,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae3281bc6d0fd7e549af32b52511e1302185bd688fd3359fa36423346ff682ea" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -724,7 +724,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", "synstructure 0.12.6", @@ -736,7 +736,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7378575ff571966e99a744addeff0bff98b8ada0dedf1956d59e634db95eaac1" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", "synstructure 0.13.1", @@ -748,7 +748,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -759,7 +759,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -1290,7 +1290,7 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -1307,7 +1307,7 @@ version = "0.1.80" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -1366,7 +1366,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fee3da8ef1276b0bee5dd1c7258010d8fffd31801447323115a25560e1327b89" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -1503,7 +1503,7 @@ dependencies = [ "lazycell", "peeking_take_while", "prettyplease 0.2.12", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "regex", "rustc-hash 1.1.0", @@ -2846,7 +2846,7 @@ checksum = "ae6371b8bdc8b7d3959e9cf7b22d4435ef3e79e138688421ec654acf8c81b008" dependencies = [ "heck 0.4.1", "proc-macro-error", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -2858,7 +2858,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d029b67f89d30bbb547c89fd5161293c0aec155fc691d7924b64550662db93e" dependencies = [ "heck 0.5.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -3046,7 +3046,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d51beaa537d73d2d1ff34ee70bc095f170420ab2ec5d687ecd3ec2b0d092514b" dependencies = [ "nom", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -4169,7 +4169,7 @@ name = "cumulus-pallet-parachain-system-proc-macro" version = "0.6.0" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -4705,7 +4705,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -4744,7 +4744,7 @@ dependencies = [ "cc", "codespan-reporting", "once_cell", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "scratch", "syn 2.0.65", @@ -4762,7 +4762,7 @@ version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50c49547d73ba8dcfd4ad7325d64c6d5391ff4224d498fc39a6f3f49825a530d" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -4868,7 +4868,7 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -4879,7 +4879,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d65d7ce8132b7c0e54497a4d9a55a1c2a0912a0d786cf894472ba818fba45762" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -4890,7 +4890,7 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -4902,7 +4902,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "rustc_version 0.4.0", "syn 1.0.109", @@ -4998,7 +4998,7 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -5058,7 +5058,7 @@ dependencies = [ "common-path", "derive-syn-parse", "once_cell", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "regex", "syn 2.0.65", @@ -5107,7 +5107,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "558e40ea573c374cf53507fd240b7ee2f5477df7cfebdb97323ec61c719399c5" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -5255,7 +5255,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -5267,7 +5267,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -5287,7 +5287,7 @@ version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e9a1f9f7d83e59740248a6e14ecf93929ade55027844dfcea78beafccc15745" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -5298,7 +5298,7 @@ version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fd000fd6988e73bbe993ea3db9b1aa64906ab88766d654973924340c8cddb42" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -5501,7 +5501,7 @@ dependencies = [ "file-guard", "fs-err", "prettyplease 0.2.12", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -5573,7 +5573,7 @@ dependencies = [ "expander", "indexmap 2.2.3", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -5905,7 +5905,7 @@ dependencies = [ "frame-support", "parity-scale-codec", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "scale-info", "sp-arithmetic", @@ -6100,7 +6100,7 @@ dependencies = [ "parity-scale-codec", "pretty_assertions", "proc-macro-warning 1.0.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "regex", "scale-info", @@ -6119,7 +6119,7 @@ version = "10.0.0" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -6128,7 +6128,7 @@ dependencies = [ name = "frame-support-procedural-tools-derive" version = "11.0.0" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -6379,7 +6379,7 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -7172,7 +7172,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -7192,7 +7192,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b139284b5cf57ecfa712bcc66950bb635b31aff41c188e8a4cfc758eca374a3f" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", ] @@ -7551,7 +7551,7 @@ checksum = "6b07a2daf52077ab1b197aea69a5c990c060143835bf04c77070e98903791715" dependencies = [ "heck 0.5.0", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -8137,7 +8137,7 @@ checksum = "c4d5ec2a3df00c7836d7696c136274c9c59705bac69133253696a6c932cd1d74" dependencies = [ "heck 0.4.1", "proc-macro-warning 0.4.2", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -8565,7 +8565,7 @@ dependencies = [ "const-random", "derive-syn-parse", "macro_magic_core_macros", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -8576,7 +8576,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b02abfe41815b5bd98dbd4260173db2c116dda171dc0fe7838cb206333b83308" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -8921,7 +8921,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb" dependencies = [ "cfg-if", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -8933,7 +8933,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af7cbce79ec385a1d4f54baa90a76401eb15d9cab93685f62e7e9f942aa00ae2" dependencies = [ "cfg-if", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -9045,7 +9045,7 @@ checksum = "fc076939022111618a5026d3be019fd8b366e76314538ff9a1b59ffbcbf98bcd" dependencies = [ "proc-macro-crate 1.3.1", "proc-macro-error", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", "synstructure 0.12.6", @@ -9093,7 +9093,7 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91761aed67d03ad966ef783ae962ef9bbaca728d2dd7ceb7939ec110fffad998" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -9484,7 +9484,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -9660,7 +9660,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -9727,7 +9727,7 @@ dependencies = [ "itertools 0.11.0", "petgraph", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -10442,7 +10442,7 @@ dependencies = [ name = "pallet-contracts-proc-macro" version = "18.0.0" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -11545,7 +11545,7 @@ dependencies = [ name = "pallet-revive-proc-macro" version = "0.1.0" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -11789,7 +11789,7 @@ name = "pallet-staking-reward-curve" version = "11.0.0" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "sp-runtime", "syn 2.0.65", @@ -12435,7 +12435,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -12464,7 +12464,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "syn 1.0.109", "synstructure 0.12.6", ] @@ -12884,7 +12884,7 @@ checksum = "68ca01446f50dbda87c1786af8770d535423fa8a53aec03b8f4e3d7eb10e0929" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -12925,7 +12925,7 @@ version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -15342,7 +15342,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c4fdfc49717fb9a196e74a5d28e0bc764eb394a2c803eb11133a31ac996c60c" dependencies = [ "polkavm-common 0.9.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -15354,7 +15354,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7855353a5a783dd5d09e3b915474bddf66575f5a3cf45dec8d1c5e051ba320dc" dependencies = [ "polkavm-common 0.10.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -15592,7 +15592,7 @@ version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "syn 1.0.109", ] @@ -15602,7 +15602,7 @@ version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c64d9ba0963cdcea2e1b2230fbae2bab30eb25a174be395c41e764bfb65dd62" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "syn 2.0.65", ] @@ -15663,7 +15663,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", "version_check", @@ -15675,7 +15675,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "version_check", ] @@ -15692,7 +15692,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -15703,7 +15703,7 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b698b0b09d40e9b7c1a47b132d66a8b54bcd20583d9b6d06e4535e383b4405c" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -15719,9 +15719,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.82" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ad3d49ab951a01fbaafe34f2ec74122942fe18a3f9814c3268f1bb72042131b" +checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" dependencies = [ "unicode-ident", ] @@ -15784,7 +15784,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -15892,7 +15892,7 @@ checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools 0.10.5", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -15905,7 +15905,7 @@ checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" dependencies = [ "anyhow", "itertools 0.11.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -16145,7 +16145,7 @@ version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", ] [[package]] @@ -16344,7 +16344,7 @@ version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -16894,7 +16894,7 @@ checksum = "d428f8247852f894ee1be110b375111b586d4fa431f6c46e64ba5a0dcccbe605" dependencies = [ "cfg-if", "glob", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "regex", "relative-path", @@ -17388,7 +17388,7 @@ name = "sc-chain-spec-derive" version = "11.0.0" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -18683,7 +18683,7 @@ name = "sc-tracing-proc-macro" version = "11.0.0" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -18797,7 +18797,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d35494501194174bda522a32605929eefc9ecf7e0a326c26db1fdd85881eb62" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -18835,7 +18835,7 @@ version = "0.8.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0f696e21e10fa546b7ffb1c9672c6de8fbc7a81acf59524386d8639bf12737" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "serde_derive_internals", "syn 1.0.109", @@ -19135,7 +19135,7 @@ version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -19146,7 +19146,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -19237,7 +19237,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -20112,7 +20112,7 @@ dependencies = [ "blake2 0.10.6", "expander", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -20515,7 +20515,7 @@ name = "sp-debug-derive" version = "8.0.0" source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -20524,7 +20524,7 @@ dependencies = [ name = "sp-debug-derive" version = "14.0.0" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -20797,7 +20797,7 @@ source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf5 dependencies = [ "Inflector", "proc-macro-crate 1.3.1", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -20809,7 +20809,7 @@ dependencies = [ "Inflector", "expander", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -21070,7 +21070,7 @@ name = "sp-version-proc-macro" version = "13.0.0" dependencies = [ "parity-scale-codec", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "sp-version", "syn 2.0.65", @@ -21155,7 +21155,7 @@ checksum = "5e6915280e2d0db8911e5032a5c275571af6bdded2916abd691a659be25d3439" dependencies = [ "Inflector", "num-format", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "serde", "serde_json", @@ -21180,7 +21180,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f07d54c4d01a1713eb363b55ba51595da15f6f1211435b71466460da022aa140" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -21375,7 +21375,7 @@ checksum = "70a2595fc3aa78f2d0e45dd425b22282dd863273761cc77780914b2cf3003acf" dependencies = [ "cfg_aliases", "memchr", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -21448,7 +21448,7 @@ checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", "proc-macro-error", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -21484,7 +21484,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "rustversion", "syn 1.0.109", @@ -21497,7 +21497,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "rustversion", "syn 2.0.65", @@ -21510,7 +21510,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c6cf59daf282c0a494ba14fd21610a0325f9f90ec9d1231dea26bcb1d696c946" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "rustversion", "syn 2.0.65", @@ -21961,7 +21961,7 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "unicode-ident", ] @@ -21972,7 +21972,7 @@ version = "2.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2863d96a84c6439701d7a38f9de935ec562c8832cc55d1dde0f513b52fad106" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "unicode-ident", ] @@ -21984,7 +21984,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86b837ef12ab88835251726eb12237655e61ec8dc8a280085d1961cdc3dfd047" dependencies = [ "paste", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -21995,7 +21995,7 @@ version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", "unicode-xid 0.2.4", @@ -22007,7 +22007,7 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -22126,7 +22126,7 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5999e24eaa32083191ba4e425deb75cdf25efefabe5aaccb7446dd0d4122a3f5" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -22290,7 +22290,7 @@ version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "10ac1c5050e43014d16b2f94d0d2ce79e65ffdd8b38d8048f9c8f6a8a6da62ac" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 1.0.109", ] @@ -22301,7 +22301,7 @@ version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -22467,7 +22467,7 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -22685,7 +22685,7 @@ version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -22727,7 +22727,7 @@ dependencies = [ "assert_matches", "expander", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -23298,7 +23298,7 @@ dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", "wasm-bindgen-shared", @@ -23332,7 +23332,7 @@ version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", "wasm-bindgen-backend", @@ -23365,7 +23365,7 @@ version = "0.3.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ecb993dd8c836930ed130e020e77d9b2e65dd0fbab1b67c790b0f5d80b11a575" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", ] @@ -24440,7 +24440,7 @@ name = "xcm-procedural" version = "7.0.0" dependencies = [ "Inflector", - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "staging-xcm", "syn 2.0.65", @@ -24606,7 +24606,7 @@ version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] @@ -24626,7 +24626,7 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.65", ] diff --git a/Cargo.toml b/Cargo.toml index 174811663deb..e2611982a985 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1079,7 +1079,7 @@ pretty_assertions = { version = "1.3.0" } primitive-types = { version = "0.12.1", default-features = false } proc-macro-crate = { version = "3.0.0" } proc-macro-warning = { version = "1.0.0", default-features = false } -proc-macro2 = { version = "1.0.64" } +proc-macro2 = { version = "1.0.86" } procfs = { version = "0.16.0" } prometheus = { version = "0.13.0", default-features = false } prometheus-endpoint = { path = "substrate/utils/prometheus", default-features = false, package = "substrate-prometheus-endpoint" } From a3eda0a1bdc67fc14efbf64c1f0046d452f55e31 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Mon, 9 Sep 2024 10:34:53 +0200 Subject: [PATCH 33/66] [CI] Remove duplicate jobs (#5602) All of these things are already tested in Github Actions: https://github.com/paritytech/polkadot-sdk/blob/b3c2a25b73bb4854f26204068f0aec3e8577196c/.github/workflows/checks-quick.yml#L50 Q: the `job-starter` seems to be used by tests, so am keeping it, but not sure how useful it is. --------- Co-authored-by: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Co-authored-by: alvicsam --- .github/workflows/checks-quick.yml | 26 ++++++++++++++++++++ .gitlab/pipeline/check.yml | 39 ------------------------------ 2 files changed, 26 insertions(+), 39 deletions(-) diff --git a/.github/workflows/checks-quick.yml b/.github/workflows/checks-quick.yml index ee5ac31e9caa..96f214e9427b 100644 --- a/.github/workflows/checks-quick.yml +++ b/.github/workflows/checks-quick.yml @@ -181,3 +181,29 @@ jobs: env: ASSERT_REGEX: "FAIL-CI" GIT_DEPTH: 1 + + confirm-required-checks-quick-jobs-passed: + runs-on: ubuntu-latest + name: All quick checks passed + # If any new job gets added, be sure to add it to this array + needs: + - fmt + - check-dependency-rules + - check-rust-feature-propagation + - test-rust-features + - check-toml-format + - check-workspace + - check-markdown + - check-umbrella + - check-fail-ci + if: always() && !cancelled() + steps: + - run: | + tee resultfile <<< '${{ toJSON(needs) }}' + FAILURES=$(cat resultfile | grep '"result": "failure"' | wc -l) + if [ $FAILURES -gt 0 ]; then + echo "### At least one required job failed ❌" >> $GITHUB_STEP_SUMMARY + exit 1 + else + echo '### Good job! All the required jobs passed 🚀' >> $GITHUB_STEP_SUMMARY + fi diff --git a/.gitlab/pipeline/check.yml b/.gitlab/pipeline/check.yml index 2212c1aeb0a8..7d1f37dddd51 100644 --- a/.gitlab/pipeline/check.yml +++ b/.gitlab/pipeline/check.yml @@ -1,25 +1,3 @@ -# from substrate -# not sure if it's needed in monorepo -check-dependency-rules: - stage: check - extends: - - .kubernetes-env - - .test-refs-no-trigger-prs-only - variables: - CI_IMAGE: "paritytech/tools:latest" - allow_failure: true - script: - - cd substrate/ - - ../.gitlab/ensure-deps.sh - -test-rust-features: - stage: check - extends: - - .kubernetes-env - - .test-refs-no-trigger-prs-only - script: - - bash .gitlab/rust-features.sh . - job-starter: stage: check image: paritytech/tools:latest @@ -29,20 +7,3 @@ job-starter: allow_failure: true script: - echo ok - -check-rust-feature-propagation: - stage: check - extends: - - .kubernetes-env - - .common-refs - script: - - zepter run check - -check-toml-format: - stage: check - extends: - - .kubernetes-env - - .common-refs - script: - - taplo format --check --config .config/taplo.toml - - echo "Please run `taplo format --config .config/taplo.toml` to fix any toml formatting issues" From c72f9ab594c8c8ea3f51323de8e1846c75f4a428 Mon Sep 17 00:00:00 2001 From: Nick Vikeras Date: Mon, 9 Sep 2024 05:12:56 -0500 Subject: [PATCH 34/66] Plumb RPC listener up to caller (#5038) # Description This PR allows the RPC server's socket address to be returned when initializing the server. This allows the library consumer to easily programmatically determine which port the RPC server is listening on. My use case for this is automated testing. I'd like to be able to simply specify that the server bind to port '0' and then test against whatever port the OS assigns dynamically. I will have many RPC servers running in parallel across many tests within a single process, and I don't want to have to deal with port conflicts. ## Integration Integration is straightforward. My main concern is that I am making non-backwards-compatible changes to public library functions. Let me know if I should leave backwards-compatible wrappers in place for any/all of the public functions that were modified. ## Review Notes The rationale for making the new listen_addresses field on the RpcHandlers struct a ```[MultiAddr]``` rather than ```SocketAddr``` is because I wanted it to be transport-agnostic as well as capable of supporting multiple listening addresses in case that is ever required by the RPC server in the future. # Checklist * [x] My PR includes a detailed description as outlined in the "Description" and its two subsections above. * [ ] My PR follows the [labeling requirements](CONTRIBUTING.md#Process) of this project (at minimum one label for `T` required) * External contributors: ask maintainers to put the right label on your PR. * [x] I have made corresponding changes to the documentation (if applicable) * [ ] I have added tests that prove my fix is effective or that my feature works (if applicable) 1. I didn't understand what the 'T' label meant. Am I supposed to open a github Issue for my PR? 2. I didn't see an easy way to add tests since the functions I am modifying are not directly called by any tests. --------- Co-authored-by: Niklas Adolfsson --- prdoc/pr_5038.prdoc | 15 +++++++++ substrate/client/rpc-servers/src/lib.rs | 42 ++++++++++++++++++++--- substrate/client/service/src/builder.rs | 16 +++++++-- substrate/client/service/src/lib.rs | 45 +++++++++++++------------ 4 files changed, 89 insertions(+), 29 deletions(-) create mode 100644 prdoc/pr_5038.prdoc diff --git a/prdoc/pr_5038.prdoc b/prdoc/pr_5038.prdoc new file mode 100644 index 000000000000..2bab8ef69f89 --- /dev/null +++ b/prdoc/pr_5038.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Plumb RPC listener up to caller + +doc: + - audience: Node Dev + description: + This PR allows the RPC server's socket address to be returned when initializing the server. + This allows the library consumer to easily programmatically determine which port the RPC server is listening on. +crates: + - name: sc-rpc-server + bump: major + - name: sc-service + bump: major diff --git a/substrate/client/rpc-servers/src/lib.rs b/substrate/client/rpc-servers/src/lib.rs index ca74c2371c25..0472a0a2f63c 100644 --- a/substrate/client/rpc-servers/src/lib.rs +++ b/substrate/client/rpc-servers/src/lib.rs @@ -23,11 +23,13 @@ pub mod middleware; pub mod utils; -use std::{error::Error as StdError, time::Duration}; +use std::{error::Error as StdError, net::SocketAddr, time::Duration}; use jsonrpsee::{ core::BoxError, - server::{serve_with_graceful_shutdown, stop_channel, ws, PingConfig, StopHandle}, + server::{ + serve_with_graceful_shutdown, stop_channel, ws, PingConfig, ServerHandle, StopHandle, + }, Methods, RpcModule, }; use middleware::NodeHealthProxyLayer; @@ -46,8 +48,38 @@ pub use utils::{RpcEndpoint, RpcMethods}; const MEGABYTE: u32 = 1024 * 1024; -/// Type alias for the JSON-RPC server. -pub type Server = jsonrpsee::server::ServerHandle; +/// Type to encapsulate the server handle and listening address. +pub struct Server { + /// Handle to the rpc server + handle: ServerHandle, + /// Listening address of the server + listen_addrs: Vec, +} + +impl Server { + /// Creates a new Server. + pub fn new(handle: ServerHandle, listen_addrs: Vec) -> Server { + Server { handle, listen_addrs } + } + + /// Returns the `jsonrpsee::server::ServerHandle` for this Server. Can be used to stop the + /// server. + pub fn handle(&self) -> &ServerHandle { + &self.handle + } + + /// The listen address for the running RPC service. + pub fn listen_addrs(&self) -> &[SocketAddr] { + &self.listen_addrs + } +} + +impl Drop for Server { + fn drop(&mut self) { + // This doesn't not wait for the server to be stopped but fires the signal. + let _ = self.handle.stop(); + } +} /// Trait for providing subscription IDs that can be cloned. pub trait SubscriptionIdProvider: @@ -273,5 +305,5 @@ where // This is to make it work with old scripts/utils that parse the logs. log::info!("Running JSON-RPC server: addr={}", format_listen_addrs(&local_addrs)); - Ok(server_handle) + Ok(Server::new(server_handle, local_addrs)) } diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs index 0dc28d1361cb..28a76847ac06 100644 --- a/substrate/client/service/src/builder.rs +++ b/substrate/client/service/src/builder.rs @@ -19,7 +19,7 @@ use crate::{ build_network_future, build_system_rpc_future, client::{Client, ClientConfig}, - config::{Configuration, ExecutorConfiguration, KeystoreConfig, PrometheusConfig}, + config::{Configuration, ExecutorConfiguration, KeystoreConfig, Multiaddr, PrometheusConfig}, error::Error, metrics::MetricsService, start_rpc_servers, BuildGenesisBlock, GenesisBlockBuilder, RpcHandlers, SpawnTaskHandle, @@ -43,6 +43,7 @@ use sc_executor::{ use sc_keystore::LocalKeystore; use sc_network::{ config::{FullNetworkConfiguration, SyncMode}, + multiaddr::Protocol, service::{ traits::{PeerStore, RequestResponseConfig}, NotificationMetrics, @@ -527,13 +528,24 @@ where gen_rpc_module, rpc_id_provider, )?; + + let listen_addrs = rpc_server_handle + .listen_addrs() + .into_iter() + .map(|socket_addr| { + let mut multiaddr: Multiaddr = socket_addr.ip().into(); + multiaddr.push(Protocol::Tcp(socket_addr.port())); + multiaddr + }) + .collect(); + let in_memory_rpc = { let mut module = gen_rpc_module()?; module.extensions_mut().insert(DenyUnsafe::No); module }; - let in_memory_rpc_handle = RpcHandlers::new(Arc::new(in_memory_rpc)); + let in_memory_rpc_handle = RpcHandlers::new(Arc::new(in_memory_rpc), listen_addrs); // Spawn informant task spawn_handle.spawn( diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs index 251eef97be84..babb76f022f0 100644 --- a/substrate/client/service/src/lib.rs +++ b/substrate/client/service/src/lib.rs @@ -34,6 +34,7 @@ mod client; mod metrics; mod task_manager; +use crate::config::Multiaddr; use std::{ collections::HashMap, net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}, @@ -50,6 +51,7 @@ use sc_network::{ }; use sc_network_sync::SyncingService; use sc_network_types::PeerId; +use sc_rpc_server::Server; use sc_utils::mpsc::TracingUnboundedReceiver; use sp_blockchain::HeaderMetadata; use sp_consensus::SyncOracle; @@ -101,14 +103,22 @@ use tokio::runtime::Handle; const DEFAULT_PROTOCOL_ID: &str = "sup"; -/// RPC handlers that can perform RPC queries. +/// A running RPC service that can perform in-memory RPC queries. #[derive(Clone)] -pub struct RpcHandlers(Arc>); +pub struct RpcHandlers { + // This is legacy and may be removed at some point, it was for WASM stuff before smoldot was a + // thing. https://github.com/paritytech/polkadot-sdk/pull/5038#discussion_r1694971805 + rpc_module: Arc>, + + // This can be used to introspect the port the RPC server is listening on. SDK consumers are + // depending on this and it should be supported even if in-memory query support is removed. + listen_addresses: Vec, +} impl RpcHandlers { /// Create PRC handlers instance. - pub fn new(inner: Arc>) -> Self { - Self(inner) + pub fn new(rpc_module: Arc>, listen_addresses: Vec) -> Self { + Self { rpc_module, listen_addresses } } /// Starts an RPC query. @@ -130,12 +140,17 @@ impl RpcHandlers { // This limit is used to prevent panics and is large enough. const TOKIO_MPSC_MAX_SIZE: usize = tokio::sync::Semaphore::MAX_PERMITS; - self.0.raw_json_request(json_query, TOKIO_MPSC_MAX_SIZE).await + self.rpc_module.raw_json_request(json_query, TOKIO_MPSC_MAX_SIZE).await } /// Provides access to the underlying `RpcModule` pub fn handle(&self) -> Arc> { - self.0.clone() + self.rpc_module.clone() + } + + /// Provides access to listen addresses + pub fn listen_addresses(&self) -> &[Multiaddr] { + &self.listen_addresses[..] } } @@ -363,20 +378,6 @@ pub async fn build_system_rpc_future< debug!("`NetworkWorker` has terminated, shutting down the system RPC future."); } -// Wrapper for HTTP and WS servers that makes sure they are properly shut down. -mod waiting { - pub struct Server(pub Option); - - impl Drop for Server { - fn drop(&mut self) { - if let Some(server) = self.0.take() { - // This doesn't not wait for the server to be stopped but fires the signal. - let _ = server.stop(); - } - } - } -} - /// Starts RPC servers. pub fn start_rpc_servers( rpc_configuration: &RpcConfiguration, @@ -384,7 +385,7 @@ pub fn start_rpc_servers( tokio_handle: &Handle, gen_rpc_module: R, rpc_id_provider: Option>, -) -> Result, error::Error> +) -> Result where R: Fn() -> Result, Error>, { @@ -451,7 +452,7 @@ where match tokio::task::block_in_place(|| { tokio_handle.block_on(sc_rpc_server::start_server(server_config)) }) { - Ok(server) => Ok(Box::new(waiting::Server(Some(server)))), + Ok(server) => Ok(server), Err(e) => Err(Error::Application(e)), } } From f4eb41773611008040c9d4d8a8e6b7323eccfca1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 9 Sep 2024 14:07:43 +0200 Subject: [PATCH 35/66] pallet-utility: Improve weight annotations (#5644) Prevent allocations when calculating the weights. --------- Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Oliver Tale-Yazdi --- prdoc/pr_5644.prdoc | 8 +++ substrate/frame/utility/src/lib.rs | 80 ++++++++++++------------------ 2 files changed, 39 insertions(+), 49 deletions(-) create mode 100644 prdoc/pr_5644.prdoc diff --git a/prdoc/pr_5644.prdoc b/prdoc/pr_5644.prdoc new file mode 100644 index 000000000000..3300d557fce4 --- /dev/null +++ b/prdoc/pr_5644.prdoc @@ -0,0 +1,8 @@ +title: 'pallet-utility: Improve weight annotations' +doc: +- audience: Runtime Dev + description: |- + Prevent allocations when calculating the weights. +crates: +- name: pallet-utility + bump: patch diff --git a/substrate/frame/utility/src/lib.rs b/substrate/frame/utility/src/lib.rs index 3ce5b4ff8649..ed5544fe55ca 100644 --- a/substrate/frame/utility/src/lib.rs +++ b/substrate/frame/utility/src/lib.rs @@ -74,7 +74,7 @@ pub use pallet::*; #[frame_support::pallet] pub mod pallet { use super::*; - use frame_support::pallet_prelude::*; + use frame_support::{dispatch::DispatchClass, pallet_prelude::*}; use frame_system::pallet_prelude::*; #[pallet::pallet] @@ -183,21 +183,8 @@ pub mod pallet { /// event is deposited. #[pallet::call_index(0)] #[pallet::weight({ - let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::>(); - let dispatch_weight = dispatch_infos.iter() - .map(|di| di.weight) - .fold(Weight::zero(), |total: Weight, weight: Weight| total.saturating_add(weight)) - .saturating_add(T::WeightInfo::batch(calls.len() as u32)); - let dispatch_class = { - let all_operational = dispatch_infos.iter() - .map(|di| di.class) - .all(|class| class == DispatchClass::Operational); - if all_operational { - DispatchClass::Operational - } else { - DispatchClass::Normal - } - }; + let (dispatch_weight, dispatch_class) = Pallet::::weight_and_dispatch_class(&calls); + let dispatch_weight = dispatch_weight.saturating_add(T::WeightInfo::batch(calls.len() as u32)); (dispatch_weight, dispatch_class) })] pub fn batch( @@ -233,13 +220,13 @@ pub mod pallet { // Take the weight of this function itself into account. let base_weight = T::WeightInfo::batch(index.saturating_add(1) as u32); // Return the actual used weight + base_weight of this call. - return Ok(Some(base_weight + weight).into()) + return Ok(Some(base_weight.saturating_add(weight)).into()) } Self::deposit_event(Event::ItemCompleted); } Self::deposit_event(Event::BatchCompleted); let base_weight = T::WeightInfo::batch(calls_len as u32); - Ok(Some(base_weight + weight).into()) + Ok(Some(base_weight.saturating_add(weight)).into()) } /// Send a call through an indexed pseudonym of the sender. @@ -305,21 +292,8 @@ pub mod pallet { /// - O(C) where C is the number of calls to be batched. #[pallet::call_index(2)] #[pallet::weight({ - let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::>(); - let dispatch_weight = dispatch_infos.iter() - .map(|di| di.weight) - .fold(Weight::zero(), |total: Weight, weight: Weight| total.saturating_add(weight)) - .saturating_add(T::WeightInfo::batch_all(calls.len() as u32)); - let dispatch_class = { - let all_operational = dispatch_infos.iter() - .map(|di| di.class) - .all(|class| class == DispatchClass::Operational); - if all_operational { - DispatchClass::Operational - } else { - DispatchClass::Normal - } - }; + let (dispatch_weight, dispatch_class) = Pallet::::weight_and_dispatch_class(&calls); + let dispatch_weight = dispatch_weight.saturating_add(T::WeightInfo::batch_all(calls.len() as u32)); (dispatch_weight, dispatch_class) })] pub fn batch_all( @@ -359,7 +333,7 @@ pub mod pallet { // Take the weight of this function itself into account. let base_weight = T::WeightInfo::batch_all(index.saturating_add(1) as u32); // Return the actual used weight + base_weight of this call. - err.post_info = Some(base_weight + weight).into(); + err.post_info = Some(base_weight.saturating_add(weight)).into(); err })?; Self::deposit_event(Event::ItemCompleted); @@ -414,21 +388,8 @@ pub mod pallet { /// - O(C) where C is the number of calls to be batched. #[pallet::call_index(4)] #[pallet::weight({ - let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::>(); - let dispatch_weight = dispatch_infos.iter() - .map(|di| di.weight) - .fold(Weight::zero(), |total: Weight, weight: Weight| total.saturating_add(weight)) - .saturating_add(T::WeightInfo::force_batch(calls.len() as u32)); - let dispatch_class = { - let all_operational = dispatch_infos.iter() - .map(|di| di.class) - .all(|class| class == DispatchClass::Operational); - if all_operational { - DispatchClass::Operational - } else { - DispatchClass::Normal - } - }; + let (dispatch_weight, dispatch_class) = Pallet::::weight_and_dispatch_class(&calls); + let dispatch_weight = dispatch_weight.saturating_add(T::WeightInfo::force_batch(calls.len() as u32)); (dispatch_weight, dispatch_class) })] pub fn force_batch( @@ -494,6 +455,27 @@ pub mod pallet { res.map(|_| ()).map_err(|e| e.error) } } + + impl Pallet { + /// Get the accumulated `weight` and the dispatch class for the given `calls`. + fn weight_and_dispatch_class( + calls: &[::RuntimeCall], + ) -> (Weight, DispatchClass) { + let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()); + let (dispatch_weight, dispatch_class) = dispatch_infos.fold( + (Weight::zero(), DispatchClass::Operational), + |(total_weight, dispatch_class): (Weight, DispatchClass), di| { + ( + total_weight.saturating_add(di.weight), + // If not all are `Operational`, we want to use `DispatchClass::Normal`. + if di.class == DispatchClass::Normal { di.class } else { dispatch_class }, + ) + }, + ); + + (dispatch_weight, dispatch_class) + } + } } /// A pallet identifier. These are per pallet and should be stored in a registry somewhere. From 030cb4a71b0b390626a586bfe7117b7c66b4700c Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Mon, 9 Sep 2024 16:27:22 +0300 Subject: [PATCH 36/66] Allow to disable gap creation during block import (#5343) This feature is helpful for us with custom sync protocol that is similar to Warp sync except we do not ever sync the gap and don't want it to exist in the first place (see https://github.com/paritytech/polkadot-sdk/issues/5333 and its references for motivation). Otherwise we had to resort to this: https://github.com/autonomys/polkadot-sdk/commit/d5375125ca7c59fcc1ac72dc1b2ac251cbc80323 --------- Co-authored-by: Davide Galassi --- prdoc/pr_5343.prdoc | 19 +++++++++++++++++++ substrate/client/api/src/backend.rs | 3 +++ substrate/client/api/src/in_mem.rs | 2 ++ .../consensus/common/src/block_import.rs | 3 +++ substrate/client/db/src/lib.rs | 11 +++++++++-- substrate/client/service/src/client/client.rs | 3 +++ 6 files changed, 39 insertions(+), 2 deletions(-) create mode 100644 prdoc/pr_5343.prdoc diff --git a/prdoc/pr_5343.prdoc b/prdoc/pr_5343.prdoc new file mode 100644 index 000000000000..3cec70de93cb --- /dev/null +++ b/prdoc/pr_5343.prdoc @@ -0,0 +1,19 @@ +title: Allow to disable gap creation during block import + +doc: + - audience: Node Dev + description: | + New property `BlockImportParams::create_gap` allows to change whether to create block gap in case block + has no parent (defaults to `true` keeping existing behavior), which is helpful for sync protocols that do not need + to sync the gap after this happens. `BlockImportOperation::create_gap()` method was also introduced, though in + most cases `BlockImportParams::create_gap` will be used. + +crates: + - name: sc-client-api + bump: major + - name: sc-consensus + bump: minor + - name: sc-client-db + bump: minor + - name: sc-service + bump: minor diff --git a/substrate/client/api/src/backend.rs b/substrate/client/api/src/backend.rs index 0b2a34952401..9c9601a912ac 100644 --- a/substrate/client/api/src/backend.rs +++ b/substrate/client/api/src/backend.rs @@ -232,6 +232,9 @@ pub trait BlockImportOperation { /// Add a transaction index operation. fn update_transaction_index(&mut self, index: Vec) -> sp_blockchain::Result<()>; + + /// Configure whether to create a block gap if newly imported block is missing parent + fn set_create_gap(&mut self, create_gap: bool); } /// Interface for performing operations on the backend. diff --git a/substrate/client/api/src/in_mem.rs b/substrate/client/api/src/in_mem.rs index ba89aede9147..c045a393bb21 100644 --- a/substrate/client/api/src/in_mem.rs +++ b/substrate/client/api/src/in_mem.rs @@ -584,6 +584,8 @@ impl backend::BlockImportOperation for BlockImportOperatio ) -> sp_blockchain::Result<()> { Ok(()) } + + fn set_create_gap(&mut self, _create_gap: bool) {} } /// In-memory backend. Keeps all states and blocks in memory. diff --git a/substrate/client/consensus/common/src/block_import.rs b/substrate/client/consensus/common/src/block_import.rs index 4d7b89f37d86..0fcf96a96368 100644 --- a/substrate/client/consensus/common/src/block_import.rs +++ b/substrate/client/consensus/common/src/block_import.rs @@ -214,6 +214,8 @@ pub struct BlockImportParams { pub fork_choice: Option, /// Re-validate existing block. pub import_existing: bool, + /// Whether to create "block gap" in case this block doesn't have parent. + pub create_gap: bool, /// Cached full header hash (with post-digests applied). pub post_hash: Option, } @@ -234,6 +236,7 @@ impl BlockImportParams { auxiliary: Vec::new(), fork_choice: None, import_existing: false, + create_gap: true, post_hash: None, } } diff --git a/substrate/client/db/src/lib.rs b/substrate/client/db/src/lib.rs index 4559a01e57e3..72707c306f58 100644 --- a/substrate/client/db/src/lib.rs +++ b/substrate/client/db/src/lib.rs @@ -834,6 +834,7 @@ pub struct BlockImportOperation { finalized_blocks: Vec<(Block::Hash, Option)>, set_head: Option, commit_state: bool, + create_gap: bool, index_ops: Vec, } @@ -988,6 +989,10 @@ impl sc_client_api::backend::BlockImportOperation self.index_ops = index_ops; Ok(()) } + + fn set_create_gap(&mut self, create_gap: bool) { + self.create_gap = create_gap; + } } struct StorageDb { @@ -1709,8 +1714,9 @@ impl Backend { unreachable!("Unsupported block gap. TODO: https://github.com/paritytech/polkadot-sdk/issues/5406") }, } - } else if number > best_num + One::one() && - number > One::one() && self.blockchain.header(parent_hash)?.is_none() + } else if operation.create_gap && + number > best_num + One::one() && + self.blockchain.header(parent_hash)?.is_none() { let gap = BlockGap { start: best_num + One::one(), @@ -2072,6 +2078,7 @@ impl sc_client_api::backend::Backend for Backend { finalized_blocks: Vec::new(), set_head: None, commit_state: false, + create_gap: true, index_ops: Default::default(), }) } diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index 8b699c7faffd..ce5b92551bf2 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -513,6 +513,7 @@ where fork_choice, intermediates, import_existing, + create_gap, .. } = import_block; @@ -537,6 +538,8 @@ where *self.importing_block.write() = Some(hash); + operation.op.set_create_gap(create_gap); + let result = self.execute_and_import_block( operation, origin, From 2d4e89f091c103813e462bd9cb188473b2673708 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Mon, 9 Sep 2024 17:12:51 +0200 Subject: [PATCH 37/66] [Bot] Revive prdoc bot (#5648) Prdoc bot was deleted in https://github.com/paritytech/polkadot-sdk/pull/5457 just after being added in https://github.com/paritytech/polkadot-sdk/pull/5331 without replacement. Now bringing it back until it is integrated into the new command structure. Formatting is now also fixed, such that the title is always first and the description renders correctly. --------- Signed-off-by: Oliver Tale-Yazdi --- .github/scripts/generate-prdoc.py | 26 +++++++-- .github/workflows/command-prdoc.yml | 90 +++++++++++++++++++++++++++++ 2 files changed, 111 insertions(+), 5 deletions(-) create mode 100644 .github/workflows/command-prdoc.yml diff --git a/.github/scripts/generate-prdoc.py b/.github/scripts/generate-prdoc.py index ba7def20fcb9..a6a97008dca6 100644 --- a/.github/scripts/generate-prdoc.py +++ b/.github/scripts/generate-prdoc.py @@ -48,9 +48,8 @@ def create_prdoc(pr, audience, title, description, patch, bump, force): else: print(f"No preexisting PrDoc for PR {pr}") - prdoc = { "doc": [{}], "crates": [] } + prdoc = { "title": title, "doc": [{}], "crates": [] } - prdoc["title"] = title prdoc["doc"][0]["audience"] = audience prdoc["doc"][0]["description"] = description @@ -58,13 +57,19 @@ def create_prdoc(pr, audience, title, description, patch, bump, force): modified_paths = [] for diff in whatthepatch.parse_patch(patch): - modified_paths.append(diff.header.new_path) + new_path = diff.header.new_path + # Sometimes this lib returns `/dev/null` as the new path... + if not new_path.startswith("/dev"): + modified_paths.append(new_path) modified_crates = {} for p in modified_paths: # Go up until we find a Cargo.toml p = os.path.join(workspace.path, p) while not os.path.exists(os.path.join(p, "Cargo.toml")): + print(f"Could not find Cargo.toml in {p}") + if p == '/': + exit(1) p = os.path.dirname(p) with open(os.path.join(p, "Cargo.toml")) as f: @@ -95,9 +100,19 @@ def create_prdoc(pr, audience, title, description, patch, bump, force): # write the parsed PR documentation back to the file with open(path, "w") as f: - yaml.dump(prdoc, f) + yaml.dump(prdoc, f, sort_keys=False) print(f"PrDoc for PR {pr} written to {path}") +# Make the `description` a multiline string instead of escaping \r\n. +def setup_yaml(): + def yaml_multiline_string_presenter(dumper, data): + if len(data.splitlines()) > 1: + data = '\n'.join([line.rstrip() for line in data.strip().splitlines()]) + return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|') + return dumper.represent_scalar('tag:yaml.org,2002:str', data) + + yaml.add_representer(str, yaml_multiline_string_presenter) + def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--pr", type=int, required=True) @@ -108,6 +123,7 @@ def parse_args(): if __name__ == "__main__": args = parse_args() - force = True if args.force.lower() == "true" else False + force = True if (args.force or "false").lower() == "true" else False print(f"Args: {args}, force: {force}") + setup_yaml() from_pr_number(args.pr, args.audience, args.bump, force) diff --git a/.github/workflows/command-prdoc.yml b/.github/workflows/command-prdoc.yml new file mode 100644 index 000000000000..3a08b9a5fb28 --- /dev/null +++ b/.github/workflows/command-prdoc.yml @@ -0,0 +1,90 @@ +name: Command PrDoc + +on: + workflow_dispatch: + inputs: + pr: + type: number + description: Number of the Pull Request + required: true + bump: + type: choice + description: Default bump level for all crates + default: "TODO" + required: true + options: + - "TODO" + - "no change" + - "patch" + - "minor" + - "major" + audience: + type: choice + description: Audience of the PrDoc + default: "TODO" + required: true + options: + - "TODO" + - "Runtime Dev" + - "Runtime User" + - "Node Dev" + - "Node User" + overwrite: + type: choice + description: Overwrite existing PrDoc + default: "true" + required: true + options: + - "true" + - "false" + +concurrency: + group: command-prdoc + cancel-in-progress: true + +jobs: + set-image: + runs-on: ubuntu-latest + outputs: + IMAGE: ${{ steps.set_image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - id: set_image + run: cat .github/env >> $GITHUB_OUTPUT + cmd-prdoc: + needs: [set-image] + runs-on: ubuntu-latest + timeout-minutes: 20 + container: + image: ${{ needs.set-image.outputs.IMAGE }} + permissions: + contents: write + pull-requests: write + steps: + - name: Download repo + uses: actions/checkout@v4 + - name: Install gh cli + id: gh + uses: ./.github/actions/set-up-gh + with: + pr-number: ${{ inputs.pr }} + GH_TOKEN: ${{ github.token }} + - name: Generate PrDoc + run: | + python3 -m pip install -q cargo-workspace PyGithub whatthepatch pyyaml toml + + python3 .github/scripts/generate-prdoc.py --pr "${{ inputs.pr }}" --bump "${{ inputs.bump }}" --audience "${{ inputs.audience }}" --force "${{ inputs.overwrite }}" + + - name: Report failure + if: ${{ failure() }} + run: gh pr comment ${{ inputs.pr }} --body "

Command failed ❌

Run by @${{ github.actor }} for ${{ github.workflow }} failed. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} + - name: Push Commit + uses: stefanzweifel/git-auto-commit-action@v5 + with: + commit_message: Add PrDoc (auto generated) + branch: ${{ steps.gh.outputs.branch }} + file_pattern: 'prdoc/*.prdoc' From 100c779e0d1be938dfe9ad1bdb4a80ac8b743dc7 Mon Sep 17 00:00:00 2001 From: ron Date: Mon, 9 Sep 2024 23:50:31 +0800 Subject: [PATCH 38/66] Revert "Revert "Bump package as minor"" This reverts commit 9761118c357a15666743a84507fea168d9580961. --- prdoc/pr_5546.prdoc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/prdoc/pr_5546.prdoc b/prdoc/pr_5546.prdoc index 16e2bd993463..44c8ee177a6f 100644 --- a/prdoc/pr_5546.prdoc +++ b/prdoc/pr_5546.prdoc @@ -13,11 +13,11 @@ crates: - name: snowbridge-pallet-outbound-queue bump: patch - name: snowbridge-pallet-system - bump: major + bump: minor - name: snowbridge-core - bump: major + bump: minor - name: snowbridge-router-primitives - bump: major + bump: minor - name: bridge-hub-westend-runtime bump: patch - name: bridge-hub-rococo-runtime From de6d3ee61759711c10f4a88d4c8eaffe78edf1d7 Mon Sep 17 00:00:00 2001 From: ron Date: Mon, 9 Sep 2024 23:52:52 +0800 Subject: [PATCH 39/66] Ignore validate check --- prdoc/pr_5546.prdoc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/prdoc/pr_5546.prdoc b/prdoc/pr_5546.prdoc index 44c8ee177a6f..95f02dbe13b2 100644 --- a/prdoc/pr_5546.prdoc +++ b/prdoc/pr_5546.prdoc @@ -14,10 +14,13 @@ crates: bump: patch - name: snowbridge-pallet-system bump: minor + validate: false - name: snowbridge-core bump: minor + validate: false - name: snowbridge-router-primitives bump: minor + validate: false - name: bridge-hub-westend-runtime bump: patch - name: bridge-hub-rococo-runtime From 116c421d1d56dc43923d6af01b5d5f7b38678099 Mon Sep 17 00:00:00 2001 From: ron Date: Tue, 10 Sep 2024 00:00:42 +0800 Subject: [PATCH 40/66] Rename as EthereumUniversalLocation be more clear --- .../primitives/router/src/inbound/mod.rs | 29 ++++++++++--------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/bridges/snowbridge/primitives/router/src/inbound/mod.rs b/bridges/snowbridge/primitives/router/src/inbound/mod.rs index 4aa721728449..49bb731e5e77 100644 --- a/bridges/snowbridge/primitives/router/src/inbound/mod.rs +++ b/bridges/snowbridge/primitives/router/src/inbound/mod.rs @@ -102,14 +102,14 @@ pub struct MessageToXcm< AccountId, Balance, ConvertAssetId, - UniversalLocation, + EthereumUniversalLocation, GlobalAssetHubLocation, > where CreateAssetCall: Get, CreateAssetDeposit: Get, Balance: BalanceT, ConvertAssetId: MaybeEquivalence, - UniversalLocation: Get, + EthereumUniversalLocation: Get, GlobalAssetHubLocation: Get, { _phantom: PhantomData<( @@ -119,7 +119,7 @@ pub struct MessageToXcm< AccountId, Balance, ConvertAssetId, - UniversalLocation, + EthereumUniversalLocation, GlobalAssetHubLocation, )>, } @@ -156,7 +156,7 @@ impl< AccountId, Balance, ConvertAssetId, - UniversalLocation, + EthereumUniversalLocation, GlobalAssetHubLocation, > ConvertMessage for MessageToXcm< @@ -166,16 +166,17 @@ impl< AccountId, Balance, ConvertAssetId, - UniversalLocation, + EthereumUniversalLocation, GlobalAssetHubLocation, - > where + > +where CreateAssetCall: Get, CreateAssetDeposit: Get, InboundQueuePalletInstance: Get, Balance: BalanceT + From, AccountId: Into<[u8; 32]>, ConvertAssetId: MaybeEquivalence, - UniversalLocation: Get, + EthereumUniversalLocation: Get, GlobalAssetHubLocation: Get, { type Balance = Balance; @@ -214,7 +215,7 @@ impl< AccountId, Balance, ConvertAssetId, - UniversalLocation, + EthereumUniversalLocation, GlobalAssetHubLocation, > MessageToXcm< @@ -224,16 +225,17 @@ impl< AccountId, Balance, ConvertAssetId, - UniversalLocation, + EthereumUniversalLocation, GlobalAssetHubLocation, - > where + > +where CreateAssetCall: Get, CreateAssetDeposit: Get, InboundQueuePalletInstance: Get, Balance: BalanceT + From, AccountId: Into<[u8; 32]>, ConvertAssetId: MaybeEquivalence, - UniversalLocation: Get, + EthereumUniversalLocation: Get, GlobalAssetHubLocation: Get, { fn convert_register_token( @@ -416,7 +418,7 @@ impl< let mut reanchored_asset_loc = asset_loc.clone(); reanchored_asset_loc - .reanchor(&GlobalAssetHubLocation::get(), &UniversalLocation::get()) + .reanchor(&GlobalAssetHubLocation::get(), &EthereumUniversalLocation::get()) .map_err(|_| ConvertMessageError::CannotReanchor)?; let asset: Asset = (reanchored_asset_loc, amount).into(); @@ -479,7 +481,8 @@ impl< // Forward message id to Asset Hub. instructions.push(SetTopic(message_id.into())); - // `total_fees` to burn on this chain when sending `instructions` to run on AH (which also teleport fees) + // `total_fees` to burn on this chain when sending `instructions` to run on AH (which also + // teleport fees) Ok((instructions.into(), total_fees.into())) } } From 5203dc02b59d4743bca9633b3b6bcc7e37b7e883 Mon Sep 17 00:00:00 2001 From: ron Date: Tue, 10 Sep 2024 00:40:00 +0800 Subject: [PATCH 41/66] Switch to using half the fee for local exec and half for transport --- .../snowbridge/primitives/router/src/inbound/mod.rs | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/bridges/snowbridge/primitives/router/src/inbound/mod.rs b/bridges/snowbridge/primitives/router/src/inbound/mod.rs index 49bb731e5e77..6c107cfa07c2 100644 --- a/bridges/snowbridge/primitives/router/src/inbound/mod.rs +++ b/bridges/snowbridge/primitives/router/src/inbound/mod.rs @@ -168,8 +168,7 @@ impl< ConvertAssetId, EthereumUniversalLocation, GlobalAssetHubLocation, - > -where + > where CreateAssetCall: Get, CreateAssetDeposit: Get, InboundQueuePalletInstance: Get, @@ -227,8 +226,7 @@ impl< ConvertAssetId, EthereumUniversalLocation, GlobalAssetHubLocation, - > -where + > where CreateAssetCall: Get, CreateAssetDeposit: Get, InboundQueuePalletInstance: Get, @@ -396,7 +394,7 @@ where asset_hub_fee: u128, ) -> Result<(Xcm<()>, Balance), ConvertMessageError> { let network = Ethereum { chain_id }; - let asset_hub_fee_asset: Asset = (Location::parent(), asset_hub_fee).into(); + let asset_hub_fee_asset: Asset = (Location::parent(), asset_hub_fee / 2).into(); let (dest_para_id, beneficiary, dest_para_fee) = match destination { // Final destination is a 32-byte account on AssetHub @@ -440,10 +438,6 @@ where let dest_para_fee_asset: Asset = (Location::parent(), dest_para_fee).into(); instructions.extend(vec![ - // `SetFeesMode` to pay transport fee from bridge sovereign, which depends on - // unspent AH fees deposited to the bridge sovereign, - // more context and analysis in https://github.com/paritytech/polkadot-sdk/pull/5546#discussion_r1744682864 - SetFeesMode { jit_withdraw: true }, // `SetAppendix` ensures that `fees` are not trapped in any case SetAppendix(Xcm(vec![DepositAsset { assets: AllCounted(2).into(), From d2e962fc1ec5c33b33ca39029d9a525404048491 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Mon, 9 Sep 2024 19:21:51 +0200 Subject: [PATCH 42/66] Install prdoc from Parity fork (#5625) Prdoc is now published as a Parity fork under the [`parity-prdoc`](https://crates.io/crates/parity-prdoc) crate after the directions diverged from the ideas of the original creator (discussions [here](https://github.com/paritytech/prdoc/pull/40) and [here](https://github.com/paritytech/prdoc/issues/36)). Now updating the install instructions here. --------- Signed-off-by: Oliver Tale-Yazdi --- .github/workflows/check-prdoc.yml | 2 +- docs/contributor/prdoc.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/check-prdoc.yml b/.github/workflows/check-prdoc.yml index 69311c41dd6f..6c8f1ed7a300 100644 --- a/.github/workflows/check-prdoc.yml +++ b/.github/workflows/check-prdoc.yml @@ -6,7 +6,7 @@ on: merge_group: env: - IMAGE: docker.io/paritytech/prdoc:v0.0.8 + IMAGE: docker.io/paritytech/prdoc:v0.1.1 API_BASE: https://api.github.com/repos REPO: ${{ github.repository }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/docs/contributor/prdoc.md b/docs/contributor/prdoc.md index 0c8165af40f4..3dddcc10f5d0 100644 --- a/docs/contributor/prdoc.md +++ b/docs/contributor/prdoc.md @@ -14,7 +14,7 @@ the [CODEOWNERS](../../.github/CODEOWNERS) for advice. A `.prdoc` file is a YAML file with a defined structure (ie JSON Schema). Please follow these steps to generate one: -1. Install the [`prdoc` CLI](https://github.com/paritytech/prdoc) by running `cargo install prdoc`. +1. Install the [`prdoc` CLI](https://github.com/paritytech/prdoc) by running `cargo install parity-prdoc`. 1. Open a Pull Request and get the PR number. 1. Generate the file with `prdoc generate `. The output filename will be printed. 1. Optional: Install the `prdoc/schema_user.json` schema in your editor, for example From f3f377f5ec7009036a33a822fdbb8439ca696ca7 Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Mon, 9 Sep 2024 22:25:42 +0200 Subject: [PATCH 43/66] [pallet-revive] move event topics in event's body (#5640) Fix https://github.com/paritytech/polkadot-sdk/issues/5629 --- prdoc/pr_5640.prdoc | 10 ++++++++++ substrate/frame/revive/src/benchmarking/mod.rs | 6 +----- substrate/frame/revive/src/exec.rs | 10 ++++------ substrate/frame/revive/src/lib.rs | 11 +++-------- substrate/frame/revive/src/tests.rs | 5 +++-- 5 files changed, 21 insertions(+), 21 deletions(-) create mode 100644 prdoc/pr_5640.prdoc diff --git a/prdoc/pr_5640.prdoc b/prdoc/pr_5640.prdoc new file mode 100644 index 000000000000..fdd7f5e1b893 --- /dev/null +++ b/prdoc/pr_5640.prdoc @@ -0,0 +1,10 @@ +title: "[pallet-revive] Move event's topics" + +doc: + - audience: Runtime Dev + description: | + Move event's topics inside body + +crates: + - name: pallet-revive + bump: major diff --git a/substrate/frame/revive/src/benchmarking/mod.rs b/substrate/frame/revive/src/benchmarking/mod.rs index 8cdd7da5db9d..8601f5f53542 100644 --- a/substrate/frame/revive/src/benchmarking/mod.rs +++ b/substrate/frame/revive/src/benchmarking/mod.rs @@ -772,11 +772,7 @@ mod benchmarks { assert_eq!( record.event, - crate::Event::ContractEmitted { contract: instance.address(), data }.into(), - ); - assert_eq!( - record.topics.iter().map(|t| H256::from_slice(t.as_ref())).collect::>(), - topics, + crate::Event::ContractEmitted { contract: instance.address(), data, topics }.into(), ); } diff --git a/substrate/frame/revive/src/exec.rs b/substrate/frame/revive/src/exec.rs index 016bdec37afd..468f5aa8240e 100644 --- a/substrate/frame/revive/src/exec.rs +++ b/substrate/frame/revive/src/exec.rs @@ -1487,13 +1487,11 @@ where } fn deposit_event(&mut self, topics: Vec, data: Vec) { - Contracts::::deposit_indexed_event( + Contracts::::deposit_event(Event::ContractEmitted { + contract: T::AddressMapper::to_address(self.account_id()), + data, topics, - Event::ContractEmitted { - contract: T::AddressMapper::to_address(self.account_id()), - data, - }, - ); + }); } fn block_number(&self) -> U256 { diff --git a/substrate/frame/revive/src/lib.rs b/substrate/frame/revive/src/lib.rs index 4c6e5cd26a11..d1e17fb7b390 100644 --- a/substrate/frame/revive/src/lib.rs +++ b/substrate/frame/revive/src/lib.rs @@ -406,6 +406,9 @@ pub mod pallet { /// Data supplied by the contract. Metadata generated during contract compilation /// is needed to decode it. data: Vec, + /// A list of topics used to index the event. + /// Number of topics is capped by [`limits::NUM_EVENT_TOPICS`]. + topics: Vec, }, /// A code with the specified hash was removed. @@ -1186,14 +1189,6 @@ where fn deposit_event(event: Event) { >::deposit_event(::RuntimeEvent::from(event)) } - - /// Deposit a pallet contracts indexed event. - fn deposit_indexed_event(topics: Vec, event: Event) { - >::deposit_event_indexed( - &topics.into_iter().map(Into::into).collect::>(), - ::RuntimeEvent::from(event).into(), - ) - } } // Set up a global reference to the boolean flag used for the re-entrancy guard. diff --git a/substrate/frame/revive/src/tests.rs b/substrate/frame/revive/src/tests.rs index f2944c7932a6..73914c9aae07 100644 --- a/substrate/frame/revive/src/tests.rs +++ b/substrate/frame/revive/src/tests.rs @@ -678,9 +678,10 @@ mod run_tests { phase: Phase::Initialization, event: RuntimeEvent::Contracts(crate::Event::ContractEmitted { contract: addr, - data: vec![1, 2, 3, 4] + data: vec![1, 2, 3, 4], + topics: vec![H256::repeat_byte(42)], }), - topics: vec![H256::repeat_byte(42)], + topics: vec![], }, EventRecord { phase: Phase::Initialization, From aec2b10539251fc20450f8efa453f21dee6b95a1 Mon Sep 17 00:00:00 2001 From: Guillaume Thiolliere Date: Tue, 10 Sep 2024 07:31:11 +0900 Subject: [PATCH 44/66] frame pallet macro: fix span for error on wrong returned type. (#5580) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Explicitly give the types in some generated code so that the error shows up good when user code is wrong. --------- Co-authored-by: Bastian Köcher --- prdoc/pr_5580.prdoc | 13 +++++++ .../procedural/src/pallet/expand/call.rs | 36 +++++++++++++----- .../procedural/src/pallet/parse/call.rs | 11 ++---- .../procedural/src/pallet/parse/helper.rs | 23 +++++++++--- .../tests/pallet_ui/call_span_for_error.rs | 37 +++++++++++++++++++ .../pallet_ui/call_span_for_error.stderr | 26 +++++++++++++ 6 files changed, 125 insertions(+), 21 deletions(-) create mode 100644 prdoc/pr_5580.prdoc create mode 100644 substrate/frame/support/test/tests/pallet_ui/call_span_for_error.rs create mode 100644 substrate/frame/support/test/tests/pallet_ui/call_span_for_error.stderr diff --git a/prdoc/pr_5580.prdoc b/prdoc/pr_5580.prdoc new file mode 100644 index 000000000000..e03b946070aa --- /dev/null +++ b/prdoc/pr_5580.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix error message on pallet macro + +doc: + - audience: Runtime Dev + description: | + Improve error message for pallet macro generated code. + +crates: + - name: frame-support-procedural + bump: patch diff --git a/substrate/frame/support/procedural/src/pallet/expand/call.rs b/substrate/frame/support/procedural/src/pallet/expand/call.rs index f395872c8a80..5dc8dc3146cf 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/call.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/call.rs @@ -18,7 +18,7 @@ use crate::{ pallet::{ expand::warnings::{weight_constant_warning, weight_witness_warning}, - parse::call::CallWeightDef, + parse::{call::CallWeightDef, helper::CallReturnType}, Def, }, COUNTER, @@ -197,18 +197,36 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { let capture_docs = if cfg!(feature = "no-metadata-docs") { "never" } else { "always" }; // Wrap all calls inside of storage layers - if let Some(syn::Item::Impl(item_impl)) = def - .call - .as_ref() - .map(|c| &mut def.item.content.as_mut().expect("Checked by def parser").1[c.index]) - { - item_impl.items.iter_mut().for_each(|i| { - if let syn::ImplItem::Fn(method) = i { + if let Some(call) = def.call.as_ref() { + let item_impl = + &mut def.item.content.as_mut().expect("Checked by def parser").1[call.index]; + let syn::Item::Impl(item_impl) = item_impl else { + unreachable!("Checked by def parser"); + }; + + item_impl.items.iter_mut().enumerate().for_each(|(i, item)| { + if let syn::ImplItem::Fn(method) = item { + let return_type = + &call.methods.get(i).expect("def should be consistent with item").return_type; + + let (ok_type, err_type) = match return_type { + CallReturnType::DispatchResult => ( + quote::quote!(()), + quote::quote!(#frame_support::pallet_prelude::DispatchError), + ), + CallReturnType::DispatchResultWithPostInfo => ( + quote::quote!(#frame_support::dispatch::PostDispatchInfo), + quote::quote!(#frame_support::dispatch::DispatchErrorWithPostInfo), + ), + }; + let block = &method.block; method.block = syn::parse_quote! {{ // We execute all dispatchable in a new storage layer, allowing them // to return an error at any point, and undoing any storage changes. - #frame_support::storage::with_storage_layer(|| #block) + #frame_support::storage::with_storage_layer::<#ok_type, #err_type, _>( + || #block + ) }}; } }); diff --git a/substrate/frame/support/procedural/src/pallet/parse/call.rs b/substrate/frame/support/procedural/src/pallet/parse/call.rs index 4e09b86fddec..68c2cb8bd1b3 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/call.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/call.rs @@ -89,6 +89,8 @@ pub struct CallVariantDef { pub cfg_attrs: Vec, /// The optional `feeless_if` attribute on the `pallet::call`. pub feeless_check: Option, + /// The return type of the call: `DispatchInfo` or `DispatchResultWithPostInfo`. + pub return_type: helper::CallReturnType, } /// Attributes for functions in call impl block. @@ -260,13 +262,7 @@ impl CallDef { }, } - if let syn::ReturnType::Type(_, type_) = &method.sig.output { - helper::check_pallet_call_return_type(type_)?; - } else { - let msg = "Invalid pallet::call, require return type \ - DispatchResultWithPostInfo"; - return Err(syn::Error::new(method.sig.span(), msg)) - } + let return_type = helper::check_pallet_call_return_type(&method.sig)?; let cfg_attrs: Vec = helper::get_item_cfg_attrs(&method.attrs); let mut call_idx_attrs = vec![]; @@ -447,6 +443,7 @@ impl CallDef { attrs: method.attrs.clone(), cfg_attrs, feeless_check, + return_type, }); } else { let msg = "Invalid pallet::call, only method accepted"; diff --git a/substrate/frame/support/procedural/src/pallet/parse/helper.rs b/substrate/frame/support/procedural/src/pallet/parse/helper.rs index d4f58a4c56df..d5ae607d90f9 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/helper.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/helper.rs @@ -597,25 +597,38 @@ pub fn check_type_value_gen( Ok(i) } +/// The possible return type of a dispatchable. +#[derive(Clone)] +pub enum CallReturnType { + DispatchResult, + DispatchResultWithPostInfo, +} + /// Check the keyword `DispatchResultWithPostInfo` or `DispatchResult`. -pub fn check_pallet_call_return_type(type_: &syn::Type) -> syn::Result<()> { - pub struct Checker; +pub fn check_pallet_call_return_type(sig: &syn::Signature) -> syn::Result { + let syn::ReturnType::Type(_, type_) = &sig.output else { + let msg = "Invalid pallet::call, require return type \ + DispatchResultWithPostInfo"; + return Err(syn::Error::new(sig.span(), msg)) + }; + + pub struct Checker(CallReturnType); impl syn::parse::Parse for Checker { fn parse(input: syn::parse::ParseStream) -> syn::Result { let lookahead = input.lookahead1(); if lookahead.peek(keyword::DispatchResultWithPostInfo) { input.parse::()?; - Ok(Self) + Ok(Self(CallReturnType::DispatchResultWithPostInfo)) } else if lookahead.peek(keyword::DispatchResult) { input.parse::()?; - Ok(Self) + Ok(Self(CallReturnType::DispatchResult)) } else { Err(lookahead.error()) } } } - syn::parse2::(type_.to_token_stream()).map(|_| ()) + syn::parse2::(type_.to_token_stream()).map(|c| c.0) } pub(crate) fn two128_str(s: &str) -> TokenStream { diff --git a/substrate/frame/support/test/tests/pallet_ui/call_span_for_error.rs b/substrate/frame/support/test/tests/pallet_ui/call_span_for_error.rs new file mode 100644 index 000000000000..08b42c29a68b --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_span_for_error.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::call] + impl Pallet { + pub fn foo(origin: OriginFor) -> DispatchResultWithPostInfo { + return Err(DispatchError::BadOrigin); + } + } +} + +fn main() {} diff --git a/substrate/frame/support/test/tests/pallet_ui/call_span_for_error.stderr b/substrate/frame/support/test/tests/pallet_ui/call_span_for_error.stderr new file mode 100644 index 000000000000..8f3003c02227 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_span_for_error.stderr @@ -0,0 +1,26 @@ +error[E0308]: mismatched types + --> tests/pallet_ui/call_span_for_error.rs:32:15 + | +32 | return Err(DispatchError::BadOrigin); + | --- ^^^^^^^^^^^^^^^^^^^^^^^^ expected `DispatchErrorWithPostInfo`, found `DispatchError` + | | + | arguments to this enum variant are incorrect + | + = note: expected struct `DispatchErrorWithPostInfo` + found enum `frame_support::pallet_prelude::DispatchError` +help: the type constructed contains `frame_support::pallet_prelude::DispatchError` due to the type of the argument passed + --> tests/pallet_ui/call_span_for_error.rs:32:11 + | +32 | return Err(DispatchError::BadOrigin); + | ^^^^------------------------^ + | | + | this argument influences the type of `Err` +note: tuple variant defined here + --> $RUST/core/src/result.rs + | + | Err(#[stable(feature = "rust1", since = "1.0.0")] E), + | ^^^ +help: call `Into::into` on this expression to convert `frame_support::pallet_prelude::DispatchError` into `DispatchErrorWithPostInfo` + | +32 | return Err(DispatchError::BadOrigin.into()); + | +++++++ From 9930d2137fcbc55e32476627ec2e30071b393533 Mon Sep 17 00:00:00 2001 From: Vedhavyas Singareddi Date: Tue, 10 Sep 2024 11:35:41 +0530 Subject: [PATCH 45/66] Update `RuntimeVerison` type and use `system_version` to derive extrinsics root `StateVersion` instead of `V0` (#4257) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR - Renames `RuntimeVersion::state_version` to `system_version` - Uses `Runtime::system_version` to derive extrinsics root `StateVersion` instead of default `StateVersion::V0` This PR should not be breaking any existing chains so long as they use same `RuntimeVersion::state_version` for `Runtime::system_version` Using `RuntimeVersion::system_version = 2` will make the extrinsics root to use `StateVersion::V1` instead of `V0` RFC for this change - https://github.com/polkadot-fellows/RFCs/pull/42 --------- Co-authored-by: Bastian Köcher Co-authored-by: Koute Co-authored-by: Nazar Mokrynskyi --- Cargo.lock | 1 + cumulus/client/network/src/tests.rs | 2 +- cumulus/client/pov-recovery/src/tests.rs | 2 +- cumulus/pallets/parachain-system/src/mock.rs | 2 +- .../assets/asset-hub-rococo/src/lib.rs | 2 +- .../assets/asset-hub-westend/src/lib.rs | 2 +- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 2 +- .../bridge-hubs/bridge-hub-westend/src/lib.rs | 2 +- .../collectives-westend/src/lib.rs | 2 +- .../contracts/contracts-rococo/src/lib.rs | 2 +- .../coretime/coretime-rococo/src/lib.rs | 2 +- .../coretime/coretime-westend/src/lib.rs | 2 +- .../glutton/glutton-westend/src/lib.rs | 2 +- .../runtimes/people/people-rococo/src/lib.rs | 2 +- .../runtimes/people/people-westend/src/lib.rs | 2 +- .../runtimes/starters/seedling/src/lib.rs | 2 +- .../runtimes/starters/shell/src/lib.rs | 2 +- .../runtimes/testing/penpal/src/lib.rs | 2 +- .../testing/rococo-parachain/src/lib.rs | 2 +- cumulus/test/runtime/src/lib.rs | 4 +- .../chain_spec_runtime/src/runtime.rs | 2 +- polkadot/runtime/rococo/src/lib.rs | 2 +- polkadot/runtime/test-runtime/src/lib.rs | 2 +- polkadot/runtime/westend/src/lib.rs | 2 +- prdoc/pr_4257.prdoc | 76 +++ substrate/bin/node/runtime/src/lib.rs | 2 +- substrate/client/block-builder/src/lib.rs | 2 +- substrate/client/executor/src/wasm_runtime.rs | 12 +- .../rpc-spec-v2/src/chain_head/tests.rs | 2 +- substrate/client/rpc/src/state/tests.rs | 3 +- .../support/test/compile_pass/src/lib.rs | 2 +- substrate/frame/system/src/lib.rs | 20 +- substrate/frame/system/src/mock.rs | 2 +- substrate/frame/system/src/tests.rs | 5 +- substrate/primitives/api/src/lib.rs | 2 +- substrate/primitives/storage/src/lib.rs | 1 + .../primitives/version/proc-macro/Cargo.toml | 1 + .../proc-macro/src/decl_runtime_version.rs | 44 +- substrate/primitives/version/src/lib.rs | 432 +++++++++++++++++- substrate/test-utils/runtime/src/lib.rs | 2 +- templates/minimal/runtime/src/lib.rs | 2 +- templates/parachain/runtime/src/lib.rs | 2 +- templates/solochain/runtime/src/lib.rs | 2 +- 43 files changed, 584 insertions(+), 79 deletions(-) create mode 100644 prdoc/pr_4257.prdoc diff --git a/Cargo.lock b/Cargo.lock index e1b0b3f8f9e9..66fd0d05cc34 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -21070,6 +21070,7 @@ name = "sp-version-proc-macro" version = "13.0.0" dependencies = [ "parity-scale-codec", + "proc-macro-warning 1.0.0", "proc-macro2 1.0.86", "quote 1.0.37", "sp-version", diff --git a/cumulus/client/network/src/tests.rs b/cumulus/client/network/src/tests.rs index cde73c4c5180..81c2d9f24f28 100644 --- a/cumulus/client/network/src/tests.rs +++ b/cumulus/client/network/src/tests.rs @@ -323,7 +323,7 @@ impl RelayChainInterface for DummyRelayChainInterface { impl_version: 0, apis: Cow::Owned(apis), transaction_version: 5, - state_version: 1, + system_version: 1, }) } } diff --git a/cumulus/client/pov-recovery/src/tests.rs b/cumulus/client/pov-recovery/src/tests.rs index 6f274ed18b6b..f300bdc5f2ba 100644 --- a/cumulus/client/pov-recovery/src/tests.rs +++ b/cumulus/client/pov-recovery/src/tests.rs @@ -329,7 +329,7 @@ impl RelayChainInterface for Relaychain { impl_version: 0, apis: Cow::Owned(apis), transaction_version: 5, - state_version: 1, + system_version: 1, }) } diff --git a/cumulus/pallets/parachain-system/src/mock.rs b/cumulus/pallets/parachain-system/src/mock.rs index b4d118aadf04..247de3a29b69 100644 --- a/cumulus/pallets/parachain-system/src/mock.rs +++ b/cumulus/pallets/parachain-system/src/mock.rs @@ -64,7 +64,7 @@ parameter_types! { impl_version: 1, apis: sp_version::create_apis_vec!([]), transaction_version: 1, - state_version: 1, + system_version: 1, }; pub const ParachainId: ParaId = ParaId::new(200); pub const ReservedXcmpWeight: Weight = Weight::zero(); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 2f3fb6b68c4a..a4a2554b7afc 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -128,7 +128,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 16, - state_version: 1, + system_version: 1, }; /// The version information used to identify this runtime when compiled natively. diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 97dbe7c361c1..6da2a0bc7b95 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -126,7 +126,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 16, - state_version: 1, + system_version: 1, }; /// The version information used to identify this runtime when compiled natively. diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 14409ce4642d..6c6e2ec7efdd 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -238,7 +238,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 5, - state_version: 1, + system_version: 1, }; /// The version information used to identify this runtime when compiled natively. diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 5717db456a77..ddd40dbf60e0 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -213,7 +213,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 5, - state_version: 1, + system_version: 1, }; /// The version information used to identify this runtime when compiled natively. diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index dea2eb03db3a..f22feb70382a 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -129,7 +129,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, - state_version: 1, + system_version: 1, }; /// The version information used to identify this runtime when compiled natively. diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index bf173fb618af..55770515d73f 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -148,7 +148,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 7, - state_version: 1, + system_version: 1, }; /// The version information used to identify this runtime when compiled natively. diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index 25324bf17764..aea2bf232cbc 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -150,7 +150,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 2, - state_version: 1, + system_version: 1, }; /// The version information used to identify this runtime when compiled natively. diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index a3051e4bf271..218afaab924d 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -149,7 +149,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 2, - state_version: 1, + system_version: 1, }; /// The version information used to identify this runtime when compiled natively. diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index 942e11e0b257..abf13a596a7d 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -106,7 +106,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, - state_version: 1, + system_version: 1, }; /// The version information used to identify this runtime when compiled natively. diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index 77bfb99669c6..cb9177d0c23b 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -138,7 +138,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, - state_version: 1, + system_version: 1, }; /// The version information used to identify this runtime when compiled natively. diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 3343d2be749d..9813c5cb6acc 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -138,7 +138,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, - state_version: 1, + system_version: 1, }; /// The version information used to identify this runtime when compiled natively. diff --git a/cumulus/parachains/runtimes/starters/seedling/src/lib.rs b/cumulus/parachains/runtimes/starters/seedling/src/lib.rs index 1fe72604d373..f126ee861fa7 100644 --- a/cumulus/parachains/runtimes/starters/seedling/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/seedling/src/lib.rs @@ -81,7 +81,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 2, - state_version: 0, + system_version: 1, }; /// The version information used to identify this runtime when compiled natively. diff --git a/cumulus/parachains/runtimes/starters/shell/src/lib.rs b/cumulus/parachains/runtimes/starters/shell/src/lib.rs index 1dfbe2b6c41c..fac2d1312c0f 100644 --- a/cumulus/parachains/runtimes/starters/shell/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/shell/src/lib.rs @@ -89,7 +89,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, - state_version: 0, + system_version: 1, }; /// The version information used to identify this runtime when compiled natively. diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 7d19c0ed8d85..266894c3e4ed 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -249,7 +249,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, - state_version: 1, + system_version: 1, }; /// This determines the average expected block time that we are targeting. diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index dff7046f1972..34646f84aedb 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -113,7 +113,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, - state_version: 0, + system_version: 0, }; pub const MILLISECS_PER_BLOCK: u64 = 6000; diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 274f16ab630d..ba0a3487011a 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -132,7 +132,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, - state_version: 1, + system_version: 1, }; #[cfg(feature = "increment-spec-version")] @@ -146,7 +146,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, - state_version: 1, + system_version: 1, }; pub const EPOCH_DURATION_IN_BLOCKS: u32 = 10 * MINUTES; diff --git a/docs/sdk/src/reference_docs/chain_spec_runtime/src/runtime.rs b/docs/sdk/src/reference_docs/chain_spec_runtime/src/runtime.rs index 195d1b124474..5be3a59dc7bb 100644 --- a/docs/sdk/src/reference_docs/chain_spec_runtime/src/runtime.rs +++ b/docs/sdk/src/reference_docs/chain_spec_runtime/src/runtime.rs @@ -46,7 +46,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, - state_version: 1, + system_version: 1, }; /// The signed extensions that are added to the runtime. diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 6b046e190830..6ec49c5830f7 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -174,7 +174,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 26, - state_version: 1, + system_version: 1, }; /// The BABE epoch configuration at genesis. diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 72d024e9a878..b03231569113 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -125,7 +125,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, - state_version: 1, + system_version: 1, }; /// The BABE epoch configuration at genesis. diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index b02c2d8c671e..d0c1cd89de32 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -175,7 +175,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 26, - state_version: 1, + system_version: 1, }; /// The BABE epoch configuration at genesis. diff --git a/prdoc/pr_4257.prdoc b/prdoc/pr_4257.prdoc new file mode 100644 index 000000000000..860b85a4888e --- /dev/null +++ b/prdoc/pr_4257.prdoc @@ -0,0 +1,76 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Rename `state_version` in `RuntimeVersion` to `system_version`. + +doc: + - audience: Runtime Dev + description: | + This PR renames `state_version` in `RuntimeVersion` to `system_version`. `system_version=2` signifies + that extrinsic root derivation uses `StateVersion::V1`. + + - audience: Runtime User + description: | + `RuntimeVersion`'s `state_version` is renamed to `system_version`. Applications using that type and its field + must update their code to reflect the changes. For easier migration serde serialization produces both new + `systemVersion` and old `stateVersion` fields and deserialization supports `stateVersion` as an alias as too. + +crates: + - name: frame-system + bump: major + - name: sp-api + bump: none + - name: sp-version + bump: major + - name: sp-storage + bump: minor + - name: sp-version-proc-macro + bump: minor + - name: sc-block-builder + bump: major + - name: sc-executor + bump: major + - name: sc-rpc + bump: none + - name: sc-rpc-spec-v2 + bump: none + - name: cumulus-pallet-parachain-system + bump: none + - name: cumulus-client-pov-recovery + bump: none + - name: cumulus-client-network + bump: none + - name: rococo-runtime + bump: major + - name: westend-runtime + bump: major + - name: asset-hub-rococo-runtime + bump: major + - name: asset-hub-westend-runtime + bump: major + - name: bridge-hub-rococo-runtime + bump: major + - name: bridge-hub-westend-runtime + bump: major + - name: collectives-westend-runtime + bump: major + - name: coretime-rococo-runtime + bump: major + - name: coretime-westend-runtime + bump: major + - name: people-rococo-runtime + bump: major + - name: people-westend-runtime + bump: major + - name: penpal-runtime + bump: major + - name: contracts-rococo-runtime + bump: major + - name: glutton-westend-runtime + bump: major + - name: seedling-runtime + bump: major + - name: shell-runtime + bump: major + - name: rococo-parachain-runtime + bump: major diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index caebd63408db..c8409078af57 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -171,7 +171,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 2, - state_version: 1, + system_version: 1, }; /// The BABE epoch configuration at genesis. diff --git a/substrate/client/block-builder/src/lib.rs b/substrate/client/block-builder/src/lib.rs index 2f22cd42591f..d02d0e321805 100644 --- a/substrate/client/block-builder/src/lib.rs +++ b/substrate/client/block-builder/src/lib.rs @@ -320,7 +320,7 @@ where header.extrinsics_root().clone(), HashingFor::::ordered_trie_root( self.extrinsics.iter().map(Encode::encode).collect(), - sp_runtime::StateVersion::V0, + self.api.version(self.parent_hash)?.extrinsics_root_state_version(), ), ); diff --git a/substrate/client/executor/src/wasm_runtime.rs b/substrate/client/executor/src/wasm_runtime.rs index be8344ba79b7..77dfc09c8807 100644 --- a/substrate/client/executor/src/wasm_runtime.rs +++ b/substrate/client/executor/src/wasm_runtime.rs @@ -480,7 +480,7 @@ mod tests { let version = decode_version(&old_runtime_version.encode()).unwrap(); assert_eq!(1, version.transaction_version); - assert_eq!(0, version.state_version); + assert_eq!(0, version.system_version); } #[test] @@ -507,12 +507,12 @@ mod tests { impl_version: 1, apis: create_apis_vec!([(>::ID, 3)]), transaction_version: 3, - state_version: 4, + system_version: 4, }; let version = decode_version(&old_runtime_version.encode()).unwrap(); assert_eq!(3, version.transaction_version); - assert_eq!(0, version.state_version); + assert_eq!(0, version.system_version); let old_runtime_version = RuntimeVersion { spec_name: "test".into(), @@ -522,12 +522,12 @@ mod tests { impl_version: 1, apis: create_apis_vec!([(>::ID, 4)]), transaction_version: 3, - state_version: 4, + system_version: 4, }; let version = decode_version(&old_runtime_version.encode()).unwrap(); assert_eq!(3, version.transaction_version); - assert_eq!(4, version.state_version); + assert_eq!(4, version.system_version); } #[test] @@ -545,7 +545,7 @@ mod tests { impl_version: 100, apis: create_apis_vec!([(>::ID, 4)]), transaction_version: 100, - state_version: 1, + system_version: 1, }; let embedded = sp_version::embed::embed_runtime_version(&wasm, runtime_version.clone()) diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index a638a9c7ec54..30a01b93b315 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -346,7 +346,7 @@ async fn follow_with_runtime() { [\"0x37e397fc7c91f5e4\",2],[\"0xd2bc9897eed08f15\",3],[\"0x40fe3ad401f8959a\",6],\ [\"0xbc9d89904f5b923f\",1],[\"0xc6e9a76309f39b09\",2],[\"0xdd718d5cc53262d4\",1],\ [\"0xcbca25e39f142387\",2],[\"0xf78b278be53f454c\",2],[\"0xab3c0572291feb8b\",1],\ - [\"0xed99c5acb25eedf5\",3],[\"0xfbc577b9d747efd6\",1]],\"transactionVersion\":1,\"stateVersion\":1}"; + [\"0xed99c5acb25eedf5\",3],[\"0xfbc577b9d747efd6\",1]],\"transactionVersion\":1,\"systemVersion\":1}"; let runtime: RuntimeVersion = serde_json::from_str(runtime_str).unwrap(); diff --git a/substrate/client/rpc/src/state/tests.rs b/substrate/client/rpc/src/state/tests.rs index eef795070343..6b711f2425e9 100644 --- a/substrate/client/rpc/src/state/tests.rs +++ b/substrate/client/rpc/src/state/tests.rs @@ -476,7 +476,8 @@ async fn should_return_runtime_version() { [\"0x37e397fc7c91f5e4\",2],[\"0xd2bc9897eed08f15\",3],[\"0x40fe3ad401f8959a\",6],\ [\"0xbc9d89904f5b923f\",1],[\"0xc6e9a76309f39b09\",2],[\"0xdd718d5cc53262d4\",1],\ [\"0xcbca25e39f142387\",2],[\"0xf78b278be53f454c\",2],[\"0xab3c0572291feb8b\",1],\ - [\"0xed99c5acb25eedf5\",3],[\"0xfbc577b9d747efd6\",1]],\"transactionVersion\":1,\"stateVersion\":1}"; + [\"0xed99c5acb25eedf5\",3],[\"0xfbc577b9d747efd6\",1]],\"transactionVersion\":1,\"systemVersion\":1,\ + \"stateVersion\":1}"; let runtime_version = api.runtime_version(None.into()).unwrap(); let serialized = serde_json::to_string(&runtime_version).unwrap(); diff --git a/substrate/frame/support/test/compile_pass/src/lib.rs b/substrate/frame/support/test/compile_pass/src/lib.rs index 37af683fbc7f..677ef4e94c89 100644 --- a/substrate/frame/support/test/compile_pass/src/lib.rs +++ b/substrate/frame/support/test/compile_pass/src/lib.rs @@ -40,7 +40,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: sp_version::create_apis_vec!([]), transaction_version: 0, - state_version: 0, + system_version: 0, }; pub type Signature = sr25519::Signature; diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index abacfa7b62cc..662b7f1a94bf 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -175,6 +175,7 @@ pub use extensions::{ pub use extensions::check_mortality::CheckMortality as CheckEra; pub use frame_support::dispatch::RawOrigin; use frame_support::traits::{PostInherents, PostTransactions, PreInherents}; +use sp_core::storage::StateVersion; pub use weights::WeightInfo; const LOG_TARGET: &str = "runtime::system"; @@ -182,17 +183,20 @@ const LOG_TARGET: &str = "runtime::system"; /// Compute the trie root of a list of extrinsics. /// /// The merkle proof is using the same trie as runtime state with -/// `state_version` 0. -pub fn extrinsics_root(extrinsics: &[E]) -> H::Output { - extrinsics_data_root::(extrinsics.iter().map(codec::Encode::encode).collect()) +/// `state_version` 0 or 1. +pub fn extrinsics_root( + extrinsics: &[E], + state_version: StateVersion, +) -> H::Output { + extrinsics_data_root::(extrinsics.iter().map(codec::Encode::encode).collect(), state_version) } /// Compute the trie root of a list of extrinsics. /// /// The merkle proof is using the same trie as runtime state with -/// `state_version` 0. -pub fn extrinsics_data_root(xts: Vec>) -> H::Output { - H::ordered_trie_root(xts, sp_core::storage::StateVersion::V0) +/// `state_version` 0 or 1. +pub fn extrinsics_data_root(xts: Vec>, state_version: StateVersion) -> H::Output { + H::ordered_trie_root(xts, state_version) } /// An object to track the currently used extrinsic weight in a block. @@ -1847,7 +1851,9 @@ impl Pallet { let extrinsics = (0..ExtrinsicCount::::take().unwrap_or_default()) .map(ExtrinsicData::::take) .collect(); - let extrinsics_root = extrinsics_data_root::(extrinsics); + let extrinsics_root_state_version = T::Version::get().extrinsics_root_state_version(); + let extrinsics_root = + extrinsics_data_root::(extrinsics, extrinsics_root_state_version); // move block hash pruning window by one block let block_hash_count = T::BlockHashCount::get(); diff --git a/substrate/frame/system/src/mock.rs b/substrate/frame/system/src/mock.rs index fff848b3b0e5..f43ffe3c87ee 100644 --- a/substrate/frame/system/src/mock.rs +++ b/substrate/frame/system/src/mock.rs @@ -40,7 +40,7 @@ parameter_types! { impl_version: 1, apis: sp_version::create_apis_vec!([]), transaction_version: 1, - state_version: 1, + system_version: 1, }; pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { read: 10, diff --git a/substrate/frame/system/src/tests.rs b/substrate/frame/system/src/tests.rs index b2cd017e1e20..534ba1e863fc 100644 --- a/substrate/frame/system/src/tests.rs +++ b/substrate/frame/system/src/tests.rs @@ -789,7 +789,10 @@ fn extrinsics_root_is_calculated_correctly() { System::note_finished_extrinsics(); let header = System::finalize(); - let ext_root = extrinsics_data_root::(vec![vec![1], vec![2]]); + let ext_root = extrinsics_data_root::( + vec![vec![1], vec![2]], + sp_core::storage::StateVersion::V0, + ); assert_eq!(ext_root, *header.extrinsics_root()); }); } diff --git a/substrate/primitives/api/src/lib.rs b/substrate/primitives/api/src/lib.rs index d254bf20601f..4b5c35562bde 100644 --- a/substrate/primitives/api/src/lib.rs +++ b/substrate/primitives/api/src/lib.rs @@ -341,7 +341,7 @@ pub use sp_api_proc_macro::decl_runtime_apis; /// // Here we are exposing the runtime api versions. /// apis: RUNTIME_API_VERSIONS, /// transaction_version: 1, -/// state_version: 1, +/// system_version: 1, /// }; /// /// # fn main() {} diff --git a/substrate/primitives/storage/src/lib.rs b/substrate/primitives/storage/src/lib.rs index 3b9afae4ca07..4b25f85fba68 100644 --- a/substrate/primitives/storage/src/lib.rs +++ b/substrate/primitives/storage/src/lib.rs @@ -444,6 +444,7 @@ impl TryFrom for StateVersion { match val { 0 => Ok(StateVersion::V0), 1 => Ok(StateVersion::V1), + 2 => Ok(StateVersion::V1), _ => Err(()), } } diff --git a/substrate/primitives/version/proc-macro/Cargo.toml b/substrate/primitives/version/proc-macro/Cargo.toml index 35c49360b7f8..a3be654547d9 100644 --- a/substrate/primitives/version/proc-macro/Cargo.toml +++ b/substrate/primitives/version/proc-macro/Cargo.toml @@ -20,6 +20,7 @@ proc-macro = true [dependencies] codec = { features = ["derive"], workspace = true, default-features = true } +proc-macro-warning = { workspace = true } proc-macro2 = { workspace = true } quote = { workspace = true } syn = { features = ["extra-traits", "fold", "full", "visit"], workspace = true } diff --git a/substrate/primitives/version/proc-macro/src/decl_runtime_version.rs b/substrate/primitives/version/proc-macro/src/decl_runtime_version.rs index 3671d4aff6bb..b4f749c90f59 100644 --- a/substrate/primitives/version/proc-macro/src/decl_runtime_version.rs +++ b/substrate/primitives/version/proc-macro/src/decl_runtime_version.rs @@ -17,6 +17,7 @@ use codec::Encode; use proc_macro2::{Span, TokenStream}; +use proc_macro_warning::Warning; use quote::quote; use syn::{ parse::{Error, Result}, @@ -37,13 +38,19 @@ pub fn decl_runtime_version_impl(input: proc_macro::TokenStream) -> proc_macro:: } fn decl_runtime_version_impl_inner(item: ItemConst) -> Result { - let runtime_version = ParseRuntimeVersion::parse_expr(&item.expr)?.build(item.expr.span())?; + let (parsed_runtime_version, warnings) = ParseRuntimeVersion::parse_expr(&item.expr)?; + let runtime_version = parsed_runtime_version.build(item.expr.span())?; let link_section = generate_emit_link_section_decl(&runtime_version.encode(), "runtime_version"); Ok(quote! { #item #link_section + const _:() = { + #( + #warnings + )* + }; }) } @@ -63,7 +70,7 @@ struct RuntimeVersion { impl_version: u32, apis: u8, transaction_version: u32, - state_version: u8, + system_version: u8, } #[derive(Default, Debug)] @@ -74,11 +81,11 @@ struct ParseRuntimeVersion { spec_version: Option, impl_version: Option, transaction_version: Option, - state_version: Option, + system_version: Option, } impl ParseRuntimeVersion { - fn parse_expr(init_expr: &Expr) -> Result { + fn parse_expr(init_expr: &Expr) -> Result<(ParseRuntimeVersion, Vec)> { let init_expr = match init_expr { Expr::Struct(ref e) => e, _ => @@ -86,13 +93,14 @@ impl ParseRuntimeVersion { }; let mut parsed = ParseRuntimeVersion::default(); + let mut warnings = vec![]; for field_value in init_expr.fields.iter() { - parsed.parse_field_value(field_value)?; + warnings.append(&mut parsed.parse_field_value(field_value)?) } - Ok(parsed) + Ok((parsed, warnings)) } - fn parse_field_value(&mut self, field_value: &FieldValue) -> Result<()> { + fn parse_field_value(&mut self, field_value: &FieldValue) -> Result> { let field_name = match field_value.member { syn::Member::Named(ref ident) => ident, syn::Member::Unnamed(_) => @@ -112,6 +120,7 @@ impl ParseRuntimeVersion { } } + let mut warnings = vec![]; if field_name == "spec_name" { parse_once(&mut self.spec_name, field_value, Self::parse_str_literal)?; } else if field_name == "impl_name" { @@ -125,7 +134,16 @@ impl ParseRuntimeVersion { } else if field_name == "transaction_version" { parse_once(&mut self.transaction_version, field_value, Self::parse_num_literal)?; } else if field_name == "state_version" { - parse_once(&mut self.state_version, field_value, Self::parse_num_literal_u8)?; + let warning = Warning::new_deprecated("RuntimeVersion") + .old("state_version") + .new("system_version)") + .help_link("https://github.com/paritytech/polkadot-sdk/pull/4257") + .span(field_name.span()) + .build_or_panic(); + warnings.push(warning); + parse_once(&mut self.system_version, field_value, Self::parse_num_literal_u8)?; + } else if field_name == "system_version" { + parse_once(&mut self.system_version, field_value, Self::parse_num_literal_u8)?; } else if field_name == "apis" { // Intentionally ignored // @@ -136,7 +154,7 @@ impl ParseRuntimeVersion { return Err(Error::new(field_name.span(), "unknown field")) } - Ok(()) + Ok(warnings) } fn parse_num_literal(expr: &Expr) -> Result { @@ -198,7 +216,7 @@ impl ParseRuntimeVersion { spec_version, impl_version, transaction_version, - state_version, + system_version, } = self; Ok(RuntimeVersion { @@ -208,7 +226,7 @@ impl ParseRuntimeVersion { spec_version: required!(spec_version), impl_version: required!(impl_version), transaction_version: required!(transaction_version), - state_version: required!(state_version), + system_version: required!(system_version), apis: 0, }) } @@ -240,7 +258,7 @@ mod tests { impl_version: 1, apis: 0, transaction_version: 2, - state_version: 1, + system_version: 1, } .encode(); @@ -255,7 +273,7 @@ mod tests { impl_version: 1, apis: Cow::Owned(vec![]), transaction_version: 2, - state_version: 1, + system_version: 1, }, ); } diff --git a/substrate/primitives/version/src/lib.rs b/substrate/primitives/version/src/lib.rs index 55dea364eef4..a9f1c2373069 100644 --- a/substrate/primitives/version/src/lib.rs +++ b/substrate/primitives/version/src/lib.rs @@ -35,12 +35,12 @@ extern crate alloc; +#[cfg(any(feature = "std", feature = "serde"))] +use alloc::fmt; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; #[cfg(feature = "std")] use std::collections::HashSet; -#[cfg(feature = "std")] -use std::fmt; #[doc(hidden)] pub use alloc::borrow::Cow; @@ -83,7 +83,7 @@ pub mod embed; /// impl_version: 1, /// apis: RUNTIME_API_VERSIONS, /// transaction_version: 2, -/// state_version: 1, +/// system_version: 1, /// }; /// /// # const RUNTIME_API_VERSIONS: sp_version::ApisVec = sp_version::create_apis_vec!([]); @@ -160,8 +160,6 @@ macro_rules! create_apis_vec { /// `authoring_version`, absolutely not `impl_version` since they change the semantics of the /// runtime. #[derive(Clone, PartialEq, Eq, Encode, Default, sp_runtime::RuntimeDebug, TypeInfo)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))] pub struct RuntimeVersion { /// Identifies the different Substrate runtimes. There'll be at least polkadot and node. /// A different on-chain spec_name to that of the native runtime would normally result @@ -200,13 +198,6 @@ pub struct RuntimeVersion { pub impl_version: u32, /// List of supported API "features" along with their versions. - #[cfg_attr( - feature = "serde", - serde( - serialize_with = "apis_serialize::serialize", - deserialize_with = "apis_serialize::deserialize", - ) - )] pub apis: ApisVec, /// All existing calls (dispatchables) are fully compatible when this number doesn't change. If @@ -230,9 +221,406 @@ pub struct RuntimeVersion { /// This number should never decrease. pub transaction_version: u32, - /// Version of the state implementation used by this runtime. + /// Version of the system implementation used by this runtime. /// Use of an incorrect version is consensus breaking. - pub state_version: u8, + pub system_version: u8, +} + +// Manual implementation in order to sprinkle `stateVersion` at the end for migration purposes +// after the field was renamed from `state_version` to `system_version` +#[cfg(feature = "serde")] +impl serde::Serialize for RuntimeVersion { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + + let mut state = serializer.serialize_struct("RuntimeVersion", 9)?; + state.serialize_field("specName", &self.spec_name)?; + state.serialize_field("implName", &self.impl_name)?; + state.serialize_field("authoringVersion", &self.authoring_version)?; + state.serialize_field("specVersion", &self.spec_version)?; + state.serialize_field("implVersion", &self.impl_version)?; + state.serialize_field("apis", { + struct SerializeWith<'a>(&'a ApisVec); + + impl<'a> serde::Serialize for SerializeWith<'a> { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + apis_serialize::serialize(self.0, serializer) + } + } + + &SerializeWith(&self.apis) + })?; + state.serialize_field("transactionVersion", &self.transaction_version)?; + state.serialize_field("systemVersion", &self.system_version)?; + state.serialize_field("stateVersion", &self.system_version)?; + state.end() + } +} + +// Manual implementation in order to allow both old `stateVersion` and new `systemVersion` to be +// present at the same time +#[cfg(feature = "serde")] +impl<'de> serde::Deserialize<'de> for RuntimeVersion { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + use core::marker::PhantomData; + + enum Field { + SpecName, + ImplName, + AuthoringVersion, + SpecVersion, + ImplVersion, + Apis, + TransactionVersion, + SystemVersion, + Ignore, + } + + struct FieldVisitor; + + impl<'de> serde::de::Visitor<'de> for FieldVisitor { + type Value = Field; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("field identifier") + } + + fn visit_u64(self, value: u64) -> Result + where + E: serde::de::Error, + { + match value { + 0 => Ok(Field::SpecName), + 1 => Ok(Field::ImplName), + 2 => Ok(Field::AuthoringVersion), + 3 => Ok(Field::SpecVersion), + 4 => Ok(Field::ImplVersion), + 5 => Ok(Field::Apis), + 6 => Ok(Field::TransactionVersion), + 7 => Ok(Field::SystemVersion), + _ => Ok(Field::Ignore), + } + } + + fn visit_str(self, value: &str) -> Result + where + E: serde::de::Error, + { + match value { + "specName" => Ok(Field::SpecName), + "implName" => Ok(Field::ImplName), + "authoringVersion" => Ok(Field::AuthoringVersion), + "specVersion" => Ok(Field::SpecVersion), + "implVersion" => Ok(Field::ImplVersion), + "apis" => Ok(Field::Apis), + "transactionVersion" => Ok(Field::TransactionVersion), + "systemVersion" | "stateVersion" => Ok(Field::SystemVersion), + _ => Ok(Field::Ignore), + } + } + + fn visit_bytes(self, value: &[u8]) -> Result + where + E: serde::de::Error, + { + match value { + b"specName" => Ok(Field::SpecName), + b"implName" => Ok(Field::ImplName), + b"authoringVersion" => Ok(Field::AuthoringVersion), + b"specVersion" => Ok(Field::SpecVersion), + b"implVersion" => Ok(Field::ImplVersion), + b"apis" => Ok(Field::Apis), + b"transactionVersion" => Ok(Field::TransactionVersion), + b"systemVersion" | b"stateVersion" => Ok(Field::SystemVersion), + _ => Ok(Field::Ignore), + } + } + } + + impl<'de> serde::Deserialize<'de> for Field { + #[inline] + fn deserialize(deserializer: E) -> Result + where + E: serde::Deserializer<'de>, + { + deserializer.deserialize_identifier(FieldVisitor) + } + } + + struct Visitor<'de> { + lifetime: PhantomData<&'de ()>, + } + impl<'de> serde::de::Visitor<'de> for Visitor<'de> { + type Value = RuntimeVersion; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("struct RuntimeVersion") + } + + #[inline] + fn visit_seq(self, mut seq: A) -> Result + where + A: serde::de::SeqAccess<'de>, + { + let spec_name = match seq.next_element()? { + Some(spec_name) => spec_name, + None => + return Err(serde::de::Error::invalid_length( + 0usize, + &"struct RuntimeVersion with 8 elements", + )), + }; + let impl_name = match seq.next_element()? { + Some(impl_name) => impl_name, + None => + return Err(serde::de::Error::invalid_length( + 1usize, + &"struct RuntimeVersion with 8 elements", + )), + }; + let authoring_version = match seq.next_element()? { + Some(authoring_version) => authoring_version, + None => + return Err(serde::de::Error::invalid_length( + 2usize, + &"struct RuntimeVersion with 8 elements", + )), + }; + let spec_version = match seq.next_element()? { + Some(spec_version) => spec_version, + None => + return Err(serde::de::Error::invalid_length( + 3usize, + &"struct RuntimeVersion with 8 elements", + )), + }; + let impl_version = match seq.next_element()? { + Some(impl_version) => impl_version, + None => + return Err(serde::de::Error::invalid_length( + 4usize, + &"struct RuntimeVersion with 8 elements", + )), + }; + let apis = match { + struct DeserializeWith<'de> { + value: ApisVec, + + phantom: PhantomData, + lifetime: PhantomData<&'de ()>, + } + impl<'de> serde::Deserialize<'de> for DeserializeWith<'de> { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + Ok(DeserializeWith { + value: apis_serialize::deserialize(deserializer)?, + phantom: PhantomData, + lifetime: PhantomData, + }) + } + } + seq.next_element::>()?.map(|wrap| wrap.value) + } { + Some(apis) => apis, + None => + return Err(serde::de::Error::invalid_length( + 5usize, + &"struct RuntimeVersion with 8 elements", + )), + }; + let transaction_version = match seq.next_element()? { + Some(transaction_version) => transaction_version, + None => + return Err(serde::de::Error::invalid_length( + 6usize, + &"struct RuntimeVersion with 8 elements", + )), + }; + let system_version = match seq.next_element()? { + Some(system_version) => system_version, + None => + return Err(serde::de::Error::invalid_length( + 7usize, + &"struct RuntimeVersion with 8 elements", + )), + }; + Ok(RuntimeVersion { + spec_name, + impl_name, + authoring_version, + spec_version, + impl_version, + apis, + transaction_version, + system_version, + }) + } + + #[inline] + fn visit_map(self, mut map: A) -> Result + where + A: serde::de::MapAccess<'de>, + { + let mut spec_name: Option = None; + let mut impl_name: Option = None; + let mut authoring_version: Option = None; + let mut spec_version: Option = None; + let mut impl_version: Option = None; + let mut apis: Option = None; + let mut transaction_version: Option = None; + let mut system_version: Option = None; + + while let Some(key) = map.next_key()? { + match key { + Field::SpecName => { + if spec_name.is_some() { + return Err(::duplicate_field( + "specName", + )); + } + spec_name = Some(map.next_value()?); + }, + Field::ImplName => { + if impl_name.is_some() { + return Err(::duplicate_field( + "implName", + )); + } + impl_name = Some(map.next_value()?); + }, + Field::AuthoringVersion => { + if authoring_version.is_some() { + return Err(::duplicate_field( + "authoringVersion", + )); + } + authoring_version = Some(map.next_value()?); + }, + Field::SpecVersion => { + if spec_version.is_some() { + return Err(::duplicate_field( + "specVersion", + )); + } + spec_version = Some(map.next_value()?); + }, + Field::ImplVersion => { + if impl_version.is_some() { + return Err(::duplicate_field( + "implVersion", + )); + } + impl_version = Some(map.next_value()?); + }, + Field::Apis => { + if apis.is_some() { + return Err(::duplicate_field("apis")); + } + apis = Some({ + struct DeserializeWith<'de> { + value: ApisVec, + lifetime: PhantomData<&'de ()>, + } + impl<'de> serde::Deserialize<'de> for DeserializeWith<'de> { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + Ok(DeserializeWith { + value: apis_serialize::deserialize(deserializer)?, + lifetime: PhantomData, + }) + } + } + + map.next_value::>()?.value + }); + }, + Field::TransactionVersion => { + if transaction_version.is_some() { + return Err(::duplicate_field( + "transactionVersion", + )); + } + transaction_version = Some(map.next_value()?); + }, + Field::SystemVersion => + if let Some(system_version) = system_version { + let new_value = map.next_value::()?; + if system_version != new_value { + return Err(::custom( + alloc::format!( + r#"Duplicated "stateVersion" and "systemVersion" \ + fields must have the same value, but different values \ + were provided: {system_version} vs {new_value}"# + ), + )); + } + } else { + system_version = Some(map.next_value()?); + }, + _ => { + let _ = map.next_value::()?; + }, + } + } + let spec_name = spec_name + .ok_or_else(|| ::missing_field("specName"))?; + let impl_name = impl_name + .ok_or_else(|| ::missing_field("implName"))?; + let authoring_version = authoring_version.ok_or_else(|| { + ::missing_field("authoringVersion") + })?; + let spec_version = spec_version + .ok_or_else(|| ::missing_field("specVersion"))?; + let impl_version = impl_version + .ok_or_else(|| ::missing_field("implVersion"))?; + let apis = + apis.ok_or_else(|| ::missing_field("apis"))?; + let transaction_version = transaction_version.ok_or_else(|| { + ::missing_field("transactionVersion") + })?; + let system_version = system_version.ok_or_else(|| { + ::missing_field("systemVersion") + })?; + Ok(RuntimeVersion { + spec_name, + impl_name, + authoring_version, + spec_version, + impl_version, + apis, + transaction_version, + system_version, + }) + } + } + + const FIELDS: &[&str] = &[ + "specName", + "implName", + "authoringVersion", + "specVersion", + "implVersion", + "apis", + "transactionVersion", + "stateVersion", + "systemVersion", + ]; + + deserializer.deserialize_struct("RuntimeVersion", FIELDS, Visitor { lifetime: PhantomData }) + } } impl RuntimeVersion { @@ -257,7 +645,7 @@ impl RuntimeVersion { if core_version.is_some() { core_version } else { core_version_from_apis(&apis) }; let transaction_version = if core_version.map(|v| v >= 3).unwrap_or(false) { Decode::decode(input)? } else { 1 }; - let state_version = + let system_version = if core_version.map(|v| v >= 4).unwrap_or(false) { Decode::decode(input)? } else { 0 }; Ok(RuntimeVersion { spec_name, @@ -267,7 +655,7 @@ impl RuntimeVersion { impl_version, apis, transaction_version, - state_version, + system_version, }) } } @@ -334,7 +722,17 @@ impl RuntimeVersion { /// Otherwise, V1 trie version will be use. pub fn state_version(&self) -> StateVersion { // If version > than 1, keep using latest version. - self.state_version.try_into().unwrap_or(StateVersion::V1) + self.system_version.try_into().unwrap_or(StateVersion::V1) + } + + /// Returns the state version to use for Extrinsics root. + pub fn extrinsics_root_state_version(&self) -> StateVersion { + match self.system_version { + // for system version 0 and 1, return V0 + 0 | 1 => StateVersion::V0, + // anything above 1, return V1 + _ => StateVersion::V1, + } } } diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs index 514f3bcba204..840081003b84 100644 --- a/substrate/test-utils/runtime/src/lib.rs +++ b/substrate/test-utils/runtime/src/lib.rs @@ -119,7 +119,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 2, apis: RUNTIME_API_VERSIONS, transaction_version: 1, - state_version: 1, + system_version: 1, }; fn version() -> RuntimeVersion { diff --git a/templates/minimal/runtime/src/lib.rs b/templates/minimal/runtime/src/lib.rs index 474d9ddfb9e8..cce13c48af71 100644 --- a/templates/minimal/runtime/src/lib.rs +++ b/templates/minimal/runtime/src/lib.rs @@ -46,7 +46,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, - state_version: 1, + system_version: 1, }; /// The version information used to identify this runtime when compiled natively. diff --git a/templates/parachain/runtime/src/lib.rs b/templates/parachain/runtime/src/lib.rs index 83ae15700a94..ccec648ce4c1 100644 --- a/templates/parachain/runtime/src/lib.rs +++ b/templates/parachain/runtime/src/lib.rs @@ -172,7 +172,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: apis::RUNTIME_API_VERSIONS, transaction_version: 1, - state_version: 1, + system_version: 1, }; #[docify::export] diff --git a/templates/solochain/runtime/src/lib.rs b/templates/solochain/runtime/src/lib.rs index 6cbfbb879602..ce38c65479e5 100644 --- a/templates/solochain/runtime/src/lib.rs +++ b/templates/solochain/runtime/src/lib.rs @@ -71,7 +71,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 1, apis: apis::RUNTIME_API_VERSIONS, transaction_version: 1, - state_version: 1, + system_version: 1, }; mod block_times { From 7c90f51dfd541db6df057d3c0ff787e92104c221 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 06:15:10 +0000 Subject: [PATCH 46/66] Bump the ci_dependencies group with 2 updates (#5637) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps the ci_dependencies group with 2 updates: [actions/checkout](https://github.com/actions/checkout) and [peter-evans/create-pull-request](https://github.com/peter-evans/create-pull-request). Updates `actions/checkout` from 2 to 4
Release notes

Sourced from actions/checkout's releases.

v4.0.0

What's Changed

New Contributors

Full Changelog: https://github.com/actions/checkout/compare/v3...v4.0.0

v3.6.0

What's Changed

New Contributors

Full Changelog: https://github.com/actions/checkout/compare/v3.5.3...v3.6.0

v3.5.3

What's Changed

New Contributors

Full Changelog: https://github.com/actions/checkout/compare/v3...v3.5.3

v3.5.2

What's Changed

Full Changelog: https://github.com/actions/checkout/compare/v3.5.1...v3.5.2

v3.5.1

What's Changed

New Contributors

... (truncated)

Commits

Updates `peter-evans/create-pull-request` from 6.1.0 to 7.0.1
Release notes

Sourced from peter-evans/create-pull-request's releases.

Create Pull Request v7.0.1

⚙️ Fixes an issue affecting one particular use case where the action fails on diff --stat with fatal: ambiguous argument.

What's Changed

Full Changelog: https://github.com/peter-evans/create-pull-request/compare/v7.0.0...v7.0.1

Create Pull Request v7.0.0

:sparkles: Now supports commit signing with bot-generated tokens! See "What's new" below. :writing_hand::robot:

Behaviour changes

  • Action input git-token has been renamed branch-token, to be more clear about its purpose. The branch-token is the token that the action will use to create and update the branch.
  • The action now handles requests that have been rate-limited by GitHub. Requests hitting a primary rate limit will retry twice, for a total of three attempts. Requests hitting a secondary rate limit will not be retried.
  • The pull-request-operation output now returns none when no operation was executed.
  • Removed deprecated output environment variable PULL_REQUEST_NUMBER. Please use the pull-request-number action output instead.

What's new

  • The action can now sign commits as github-actions[bot] when using GITHUB_TOKEN, or your own bot when using GitHub App tokens. See commit signing for details.
  • Action input draft now accepts a new value always-true. This will set the pull request to draft status when the pull request is updated, as well as on creation.
  • A new action input maintainer-can-modify indicates whether maintainers can modify the pull request. The default is true, which retains the existing behaviour of the action.
  • A new output pull-request-commits-verified returns true or false, indicating whether GitHub considers the signature of the branch's commits to be verified.

What's Changed

... (truncated)

Commits
  • 8867c4a fix: handle ambiguous argument failure on diff stat (#3312)
  • 6073f54 build(deps-dev): bump @​typescript-eslint/eslint-plugin (#3291)
  • 6d01b56 build(deps-dev): bump eslint-plugin-import from 2.29.1 to 2.30.0 (#3290)
  • 25cf845 build(deps-dev): bump @​typescript-eslint/parser from 7.17.0 to 7.18.0 (#3289)
  • d87b980 build(deps-dev): bump @​types/node from 18.19.46 to 18.19.48 (#3288)
  • 119d131 build(deps): bump peter-evans/create-pull-request from 6 to 7 (#3283)
  • 73e6230 docs: update readme
  • c0348e8 ci: add v7 to workflow
  • 4320041 feat: signed commits (v7) (#3057)
  • 0c2a66f build(deps-dev): bump ts-jest from 29.2.4 to 29.2.5 (#3256)
  • Additional commits viewable in compare view

Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore major version` will close this group update PR and stop Dependabot creating any more for the specific dependency's major version (unless you unignore this specific dependency's major version or upgrade to it yourself) - `@dependabot ignore minor version` will close this group update PR and stop Dependabot creating any more for the specific dependency's minor version (unless you unignore this specific dependency's minor version or upgrade to it yourself) - `@dependabot ignore ` will close this group update PR and stop Dependabot creating any more for the specific dependency (unless you unignore this specific dependency or upgrade to it yourself) - `@dependabot unignore ` will remove all of the ignore conditions of the specified dependency - `@dependabot unignore ` will remove the ignore condition of the specified dependency and ignore conditions
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/check-licenses.yml | 2 +- .github/workflows/check-links.yml | 2 +- .github/workflows/check-prdoc.yml | 2 +- .github/workflows/check-semver.yml | 2 +- .github/workflows/checks-quick.yml | 16 ++++++++-------- .github/workflows/checks.yml | 6 +++--- .github/workflows/misc-sync-templates.yml | 4 ++-- .github/workflows/publish-check-crates.yml | 2 +- .github/workflows/publish-claim-crates.yml | 2 +- .github/workflows/release-10_rc-automation.yml | 2 +- .../release-30_publish_release_draft.yml | 6 +++--- .github/workflows/release-50_publish-docker.yml | 8 ++++---- .github/workflows/release-branchoff-stable.yml | 4 ++-- .github/workflows/release-check-runtimes.yml | 4 ++-- .github/workflows/release-clobber-stable.yml | 2 +- .github/workflows/release-srtool.yml | 4 ++-- .../workflows/tests-linux-stable-coverage.yml | 2 +- .github/workflows/tests-misc.yml | 16 ++++++++-------- 18 files changed, 43 insertions(+), 43 deletions(-) diff --git a/.github/workflows/check-licenses.yml b/.github/workflows/check-licenses.yml index a74986048976..e3fc27f0a94b 100644 --- a/.github/workflows/check-licenses.yml +++ b/.github/workflows/check-licenses.yml @@ -16,7 +16,7 @@ jobs: NODE_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - name: Checkout sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - uses: actions/setup-node@v4.0.3 with: node-version: "18.x" diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml index d10f34e6faef..1060d252fd2a 100644 --- a/.github/workflows/check-links.yml +++ b/.github/workflows/check-links.yml @@ -25,7 +25,7 @@ jobs: # This should restore from the most recent one: restore-keys: cache-lychee- - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.0 (22. Sep 2023) + - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.0 (22. Sep 2023) - name: Lychee link checker uses: lycheeverse/lychee-action@2b973e86fc7b1f6b36a93795fe2c9c6ae1118621 # for v1.9.1 (10. Jan 2024) diff --git a/.github/workflows/check-prdoc.yml b/.github/workflows/check-prdoc.yml index 6c8f1ed7a300..8f60b9cccf85 100644 --- a/.github/workflows/check-prdoc.yml +++ b/.github/workflows/check-prdoc.yml @@ -20,7 +20,7 @@ jobs: if: github.event.pull_request.number != '' steps: - name: Checkout repo - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 #v4.1.7 + uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc #v4.1.7 # we cannot show the version in this step (ie before checking out the repo) # due to https://github.com/paritytech/prdoc/issues/15 - name: Check if PRdoc is required diff --git a/.github/workflows/check-semver.yml b/.github/workflows/check-semver.yml index 15eb32f4062c..2c06df5a5092 100644 --- a/.github/workflows/check-semver.yml +++ b/.github/workflows/check-semver.yml @@ -18,7 +18,7 @@ jobs: container: image: docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408 steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 with: fetch-depth: 2 diff --git a/.github/workflows/checks-quick.yml b/.github/workflows/checks-quick.yml index 96f214e9427b..c936e7c89389 100644 --- a/.github/workflows/checks-quick.yml +++ b/.github/workflows/checks-quick.yml @@ -35,14 +35,14 @@ jobs: container: image: ${{ needs.set-image.outputs.IMAGE }} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - name: Cargo fmt run: cargo +nightly fmt --all -- --check check-dependency-rules: runs-on: ubuntu-latest timeout-minutes: 20 steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - name: check dependency rules run: | cd substrate/ @@ -54,7 +54,7 @@ jobs: container: image: ${{ needs.set-image.outputs.IMAGE }} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - name: fetch deps run: | # Pull all dependencies eagerly: @@ -70,7 +70,7 @@ jobs: container: image: ${{ needs.set-image.outputs.IMAGE }} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - name: run rust features run: bash .gitlab/rust-features.sh . check-toml-format: @@ -80,7 +80,7 @@ jobs: container: image: ${{ needs.set-image.outputs.IMAGE }} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - name: check toml format run: | taplo format --check --config .config/taplo.toml @@ -89,7 +89,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 20 steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.0 (22. Sep 2023) + - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.0 (22. Sep 2023) - name: install python deps run: | sudo apt-get update && sudo apt-get install -y python3-pip python3 @@ -109,7 +109,7 @@ jobs: timeout-minutes: 20 steps: - name: Checkout sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - name: Setup Node.js uses: actions/setup-node@v4.0.3 with: @@ -134,7 +134,7 @@ jobs: container: image: ${{ needs.set-image.outputs.IMAGE }} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.0 (22. Sep 2023) + - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.0 (22. Sep 2023) - name: install python deps run: pip3 install "cargo-workspace>=1.2.4" toml - name: check umbrella correctness diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index 9de879d83676..cba7df517425 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -54,7 +54,7 @@ jobs: RUSTFLAGS: "-D warnings" SKIP_WASM_BUILD: 1 steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - name: script run: | forklift cargo clippy --all-targets --locked --workspace @@ -67,7 +67,7 @@ jobs: container: image: ${{ needs.set-image.outputs.IMAGE }} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - name: script run: | forklift cargo check --locked --all --features try-runtime @@ -86,7 +86,7 @@ jobs: container: image: ${{ needs.set-image.outputs.IMAGE }} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - name: script run: | cd substrate/primitives/core diff --git a/.github/workflows/misc-sync-templates.yml b/.github/workflows/misc-sync-templates.yml index c06beb5e98eb..658da4451dc2 100644 --- a/.github/workflows/misc-sync-templates.yml +++ b/.github/workflows/misc-sync-templates.yml @@ -157,7 +157,7 @@ jobs: timeout-minutes: 90 - name: Create PR on failure if: failure() && steps.check-compilation.outcome == 'failure' - uses: peter-evans/create-pull-request@c5a7806660adbe173f04e3e038b0ccdcd758773c # v5 + uses: peter-evans/create-pull-request@8867c4aba1b742c39f8d0ba35429c2dfa4b6cb20 # v5 with: path: "${{ env.template-path }}" token: ${{ steps.app_token.outputs.token }} @@ -167,7 +167,7 @@ jobs: body: "The template has NOT been successfully built and needs to be inspected." branch: "update-template/${{ github.event.inputs.stable_release_branch }}" - name: Create PR on success - uses: peter-evans/create-pull-request@c5a7806660adbe173f04e3e038b0ccdcd758773c # v5 + uses: peter-evans/create-pull-request@8867c4aba1b742c39f8d0ba35429c2dfa4b6cb20 # v5 with: path: "${{ env.template-path }}" token: ${{ steps.app_token.outputs.token }} diff --git a/.github/workflows/publish-check-crates.yml b/.github/workflows/publish-check-crates.yml index 9f96b92e0ce7..77653cd43b62 100644 --- a/.github/workflows/publish-check-crates.yml +++ b/.github/workflows/publish-check-crates.yml @@ -12,7 +12,7 @@ jobs: check-publish: runs-on: ubuntu-latest steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - name: Rust Cache uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 diff --git a/.github/workflows/publish-claim-crates.yml b/.github/workflows/publish-claim-crates.yml index bee709a12076..77f04861bfff 100644 --- a/.github/workflows/publish-claim-crates.yml +++ b/.github/workflows/publish-claim-crates.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest environment: master steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - name: Rust Cache uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 diff --git a/.github/workflows/release-10_rc-automation.yml b/.github/workflows/release-10_rc-automation.yml index 2d91850b82c1..195c14dbd5ab 100644 --- a/.github/workflows/release-10_rc-automation.yml +++ b/.github/workflows/release-10_rc-automation.yml @@ -26,7 +26,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 with: fetch-depth: 0 diff --git a/.github/workflows/release-30_publish_release_draft.yml b/.github/workflows/release-30_publish_release_draft.yml index 4343dbf915a9..dd6a111d67e8 100644 --- a/.github/workflows/release-30_publish_release_draft.yml +++ b/.github/workflows/release-30_publish_release_draft.yml @@ -36,7 +36,7 @@ jobs: binary: [ [frame-omni-bencher, frame-omni-bencher], [staging-chain-spec-builder, chain-spec-builder] ] steps: - name: Checkout sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.0.0 + uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.0.0 - name: Install protobuf-compiler run: | @@ -63,7 +63,7 @@ jobs: asset_upload_url: ${{ steps.create-release.outputs.upload_url }} steps: - name: Checkout - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.0.0 + uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.0.0 - name: Download artifacts uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 @@ -134,7 +134,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.0.0 + uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.0.0 - name: Download artifacts uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 diff --git a/.github/workflows/release-50_publish-docker.yml b/.github/workflows/release-50_publish-docker.yml index c5d214ec68ab..72e01a4833e2 100644 --- a/.github/workflows/release-50_publish-docker.yml +++ b/.github/workflows/release-50_publish-docker.yml @@ -86,7 +86,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - name: Validate inputs id: validate_inputs @@ -111,7 +111,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 #TODO: this step will be needed when automated triggering will work #this step runs only if the workflow is triggered automatically when new release is published @@ -159,7 +159,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - name: Download artifacts uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 @@ -295,7 +295,7 @@ jobs: environment: release steps: - name: Checkout sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - name: Set up Docker Buildx uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 diff --git a/.github/workflows/release-branchoff-stable.yml b/.github/workflows/release-branchoff-stable.yml index c236a66a9fae..c4c50f5398e8 100644 --- a/.github/workflows/release-branchoff-stable.yml +++ b/.github/workflows/release-branchoff-stable.yml @@ -26,7 +26,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - name: Validate inputs id: validate_inputs @@ -60,7 +60,7 @@ jobs: pip install git+https://github.com/paritytech-release/pgpkms.git@5a8f82fbb607ea102d8c178e761659de54c7af69 - name: Checkout sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 with: ref: master diff --git a/.github/workflows/release-check-runtimes.yml b/.github/workflows/release-check-runtimes.yml index 930b8da772d0..6666c115562f 100644 --- a/.github/workflows/release-check-runtimes.yml +++ b/.github/workflows/release-check-runtimes.yml @@ -35,7 +35,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout the repo - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - name: Get list id: get-list @@ -56,7 +56,7 @@ jobs: steps: - name: Checkout the repo - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - name: Fetch release artifacts based on release id env: diff --git a/.github/workflows/release-clobber-stable.yml b/.github/workflows/release-clobber-stable.yml index 50c20563b434..0d2ce78ab781 100644 --- a/.github/workflows/release-clobber-stable.yml +++ b/.github/workflows/release-clobber-stable.yml @@ -24,7 +24,7 @@ jobs: AUDITED: audited steps: - name: Checkout - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - name: Prechecks run: | diff --git a/.github/workflows/release-srtool.yml b/.github/workflows/release-srtool.yml index 262203f05005..83119dd4ed24 100644 --- a/.github/workflows/release-srtool.yml +++ b/.github/workflows/release-srtool.yml @@ -28,7 +28,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.0.0 + uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.0.0 with: fetch-depth: 0 @@ -69,7 +69,7 @@ jobs: matrix: ${{ fromJSON(needs.find-runtimes.outputs.runtime) }} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.0.0 + - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.0.0 with: fetch-depth: 0 diff --git a/.github/workflows/tests-linux-stable-coverage.yml b/.github/workflows/tests-linux-stable-coverage.yml index ddf0642a4043..4c0a2629e418 100644 --- a/.github/workflows/tests-linux-stable-coverage.yml +++ b/.github/workflows/tests-linux-stable-coverage.yml @@ -137,7 +137,7 @@ jobs: needs: [upload-reports] if: github.event_name == 'pull_request' steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - uses: actions-ecosystem/action-remove-labels@v1 with: labels: GHA-coverage \ No newline at end of file diff --git a/.github/workflows/tests-misc.yml b/.github/workflows/tests-misc.yml index 8e8f5770e92d..97ad86e3998e 100644 --- a/.github/workflows/tests-misc.yml +++ b/.github/workflows/tests-misc.yml @@ -60,7 +60,7 @@ jobs: RUST_BACKTRACE: 1 steps: - name: Checkout - uses: actions/checkout@v4.1.7 + uses: actions/checkout@v4 - name: script run: | @@ -185,7 +185,7 @@ jobs: needs: [set-image, cargo-check-benches] steps: - name: Checkout - uses: actions/checkout@v4.1.7 + uses: actions/checkout@v4 - name: Download artifact (master run) uses: actions/download-artifact@v4.1.8 @@ -225,7 +225,7 @@ jobs: image: ${{ needs.set-image.outputs.IMAGE }} steps: - name: Checkout - uses: actions/checkout@v4.1.7 + uses: actions/checkout@v4 - name: Run tests id: tests @@ -257,7 +257,7 @@ jobs: image: ${{ needs.set-image.outputs.IMAGE }} steps: - name: Checkout - uses: actions/checkout@v4.1.7 + uses: actions/checkout@v4 - name: script run: | @@ -272,7 +272,7 @@ jobs: image: ${{ needs.set-image.outputs.IMAGE }} steps: - name: Checkout - uses: actions/checkout@v4.1.7 + uses: actions/checkout@v4 - name: script run: | @@ -301,7 +301,7 @@ jobs: --config=patch.crates-io.honggfuzz.rev="205f7c8c059a0d98fe1cb912cdac84f324cb6981" steps: - name: Checkout - uses: actions/checkout@v4.1.7 + uses: actions/checkout@v4 - name: Run honggfuzz run: | @@ -332,7 +332,7 @@ jobs: index: [1, 2, 3, 4, 5, 6, 7] # 7 parallel jobs steps: - name: Checkout - uses: actions/checkout@v4.1.7 + uses: actions/checkout@v4 - name: Check Rust run: | @@ -359,7 +359,7 @@ jobs: # index: [ 1,2,3,4,5,6,7,8,9,10 ] # 10 parallel jobs # steps: # - name: Checkout - # uses: actions/checkout@v4.1.7 + # uses: actions/checkout@v4 # - name: Install dependencies # uses: ./.github/actions/set-up-mac From 8236718e961cb357d46368946f0544ec8ce6718e Mon Sep 17 00:00:00 2001 From: Liu-Cheng Xu Date: Tue, 10 Sep 2024 17:07:03 +0800 Subject: [PATCH 47/66] Fix edge case where state sync is not triggered (#5635) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR addresses an issue where state sync may fail to start if the conditions required for its initiation are not met when a finalized block notification is received. `pending_state_sync_attempt` is introduced to trigger the state sync later when the conditions are satisfied. This issue was spotted when I worked on #5406, specifically, `queue_blocks` was not empty when the finalized block notification was received, and then the state sync was stalled. cc @dmitry-markin --------- Co-authored-by: Dmitry Markin Co-authored-by: Bastian Köcher --- prdoc/pr_5635.prdoc | 13 ++ .../network/sync/src/strategy/chain_sync.rs | 162 +++++++++++------- 2 files changed, 109 insertions(+), 66 deletions(-) create mode 100644 prdoc/pr_5635.prdoc diff --git a/prdoc/pr_5635.prdoc b/prdoc/pr_5635.prdoc new file mode 100644 index 000000000000..168d65970c95 --- /dev/null +++ b/prdoc/pr_5635.prdoc @@ -0,0 +1,13 @@ +title: Fix edge case where state sync is not triggered + +doc: + - audience: Node Dev + description: | + There is an edge case where the finalized block notification is received, but the conditions required to initiate the + state sync are not fully met. In such cases, state sync would fail to start as expected and remain stalled. + This patch addresses it by storing the pending attempt and trying to start the state sync later when the conditions + are satisfied. + +crates: + - name: sc-network-sync + bump: patch diff --git a/substrate/client/network/sync/src/strategy/chain_sync.rs b/substrate/client/network/sync/src/strategy/chain_sync.rs index f29ed1b083e8..cca83a5055cb 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync.rs @@ -254,6 +254,14 @@ pub struct ChainSync { /// A set of hashes of blocks that are being downloaded or have been /// downloaded and are queued for import. queue_blocks: HashSet, + /// A pending attempt to start the state sync. + /// + /// The initiation of state sync may be deferred in cases where other conditions + /// are not yet met when the finalized block notification is received, such as + /// when `queue_blocks` is not empty or there are no peers. This field holds the + /// necessary information to attempt the state sync at a later point when + /// conditions are satisfied. + pending_state_sync_attempt: Option<(B::Hash, NumberFor, bool)>, /// Fork sync targets. fork_targets: HashMap>, /// A set of peers for which there might be potential block requests @@ -376,6 +384,7 @@ where extra_justifications: ExtraRequests::new("justification", metrics_registry), mode, queue_blocks: Default::default(), + pending_state_sync_attempt: None, fork_targets: Default::default(), allowed_requests: Default::default(), max_parallel_downloads, @@ -497,7 +506,7 @@ where "💔 New peer {} with unknown genesis hash {} ({}).", peer_id, best_hash, best_number, ); - return Err(BadPeer(peer_id, rep::GENESIS_MISMATCH)) + return Err(BadPeer(peer_id, rep::GENESIS_MISMATCH)); } // If there are more than `MAJOR_SYNC_BLOCKS` in the import queue then we have @@ -521,7 +530,7 @@ where state: PeerSyncState::Available, }, ); - return Ok(None) + return Ok(None); } // If we are at genesis, just start downloading. @@ -644,14 +653,14 @@ where if self.is_known(hash) { debug!(target: LOG_TARGET, "Refusing to sync known hash {hash:?}"); - return + return; } trace!(target: LOG_TARGET, "Downloading requested old fork {hash:?}"); for peer_id in &peers { if let Some(peer) = self.peers.get_mut(peer_id) { if let PeerSyncState::AncestorSearch { .. } = peer.state { - continue + continue; } if number > peer.best_number { @@ -748,14 +757,14 @@ where blocks } else { debug!(target: LOG_TARGET, "Unexpected gap block response from {peer_id}"); - return Err(BadPeer(*peer_id, rep::NO_BLOCK)) + return Err(BadPeer(*peer_id, rep::NO_BLOCK)); } }, PeerSyncState::DownloadingStale(_) => { peer.state = PeerSyncState::Available; if blocks.is_empty() { debug!(target: LOG_TARGET, "Empty block response from {peer_id}"); - return Err(BadPeer(*peer_id, rep::NO_BLOCK)) + return Err(BadPeer(*peer_id, rep::NO_BLOCK)); } validate_blocks::(&blocks, peer_id, Some(request))?; blocks @@ -796,14 +805,14 @@ where target: LOG_TARGET, "Invalid response when searching for ancestor from {peer_id}", ); - return Err(BadPeer(*peer_id, rep::UNKNOWN_ANCESTOR)) + return Err(BadPeer(*peer_id, rep::UNKNOWN_ANCESTOR)); }, (_, Err(e)) => { info!( target: LOG_TARGET, "❌ Error answering legitimate blockchain query: {e}", ); - return Err(BadPeer(*peer_id, rep::BLOCKCHAIN_READ_ERROR)) + return Err(BadPeer(*peer_id, rep::BLOCKCHAIN_READ_ERROR)); }, }; if matching_hash.is_some() { @@ -837,7 +846,7 @@ where target: LOG_TARGET, "Ancestry search: genesis mismatch for peer {peer_id}", ); - return Err(BadPeer(*peer_id, rep::GENESIS_MISMATCH)) + return Err(BadPeer(*peer_id, rep::GENESIS_MISMATCH)); } if let Some((next_state, next_num)) = handle_ancestor_search_state(state, *current, matching_hash.is_some()) @@ -852,7 +861,7 @@ where peer_id: *peer_id, request, }); - return Ok(()) + return Ok(()); } else { // Ancestry search is complete. Check if peer is on a stale fork unknown // to us and add it to sync targets if necessary. @@ -892,7 +901,7 @@ where .insert(*peer_id); } peer.state = PeerSyncState::Available; - return Ok(()) + return Ok(()); } }, PeerSyncState::Available | @@ -925,7 +934,7 @@ where } } else { // We don't know of this peer, so we also did not request anything from it. - return Err(BadPeer(*peer_id, rep::NOT_REQUESTED)) + return Err(BadPeer(*peer_id, rep::NOT_REQUESTED)); }; self.validate_and_queue_blocks(new_blocks, gap); @@ -947,7 +956,7 @@ where target: LOG_TARGET, "💔 Called on_block_justification with a peer ID of an unknown peer", ); - return Ok(()) + return Ok(()); }; self.allowed_requests.add(&peer_id); @@ -964,7 +973,7 @@ where hash, block.hash, ); - return Err(BadPeer(peer_id, rep::BAD_JUSTIFICATION)) + return Err(BadPeer(peer_id, rep::BAD_JUSTIFICATION)); } block @@ -990,7 +999,7 @@ where number, justifications, }); - return Ok(()) + return Ok(()); } } @@ -1013,26 +1022,11 @@ where }); if let ChainSyncMode::LightState { skip_proofs, .. } = &self.mode { - if self.state_sync.is_none() && !self.peers.is_empty() && self.queue_blocks.is_empty() { - // Finalized a recent block. - let mut heads: Vec<_> = self.peers.values().map(|peer| peer.best_number).collect(); - heads.sort(); - let median = heads[heads.len() / 2]; - if number + STATE_SYNC_FINALITY_THRESHOLD.saturated_into() >= median { - if let Ok(Some(header)) = self.client.header(*hash) { - log::debug!( - target: LOG_TARGET, - "Starting state sync for #{number} ({hash})", - ); - self.state_sync = Some(StateSync::new( - self.client.clone(), - header, - None, - None, - *skip_proofs, - )); - self.allowed_requests.set_all(); - } + if self.state_sync.is_none() { + if !self.peers.is_empty() && self.queue_blocks.is_empty() { + self.attempt_state_sync(*hash, number, *skip_proofs); + } else { + self.pending_state_sync_attempt.replace((*hash, number, *skip_proofs)); } } } @@ -1045,6 +1039,35 @@ where } } + fn attempt_state_sync( + &mut self, + finalized_hash: B::Hash, + finalized_number: NumberFor, + skip_proofs: bool, + ) { + let mut heads: Vec<_> = self.peers.values().map(|peer| peer.best_number).collect(); + heads.sort(); + let median = heads[heads.len() / 2]; + if finalized_number + STATE_SYNC_FINALITY_THRESHOLD.saturated_into() >= median { + if let Ok(Some(header)) = self.client.header(finalized_hash) { + log::debug!( + target: LOG_TARGET, + "Starting state sync for #{finalized_number} ({finalized_hash})", + ); + self.state_sync = + Some(StateSync::new(self.client.clone(), header, None, None, skip_proofs)); + self.allowed_requests.set_all(); + } else { + log::error!( + target: LOG_TARGET, + "Failed to start state sync: header for finalized block \ + #{finalized_number} ({finalized_hash}) is not available", + ); + debug_assert!(false); + } + } + } + /// Submit a validated block announcement. /// /// Returns new best hash & best number of the peer if they are updated. @@ -1067,12 +1090,12 @@ where peer } else { error!(target: LOG_TARGET, "💔 Called `on_validated_block_announce` with a bad peer ID {peer_id}"); - return Some((hash, number)) + return Some((hash, number)); }; if let PeerSyncState::AncestorSearch { .. } = peer.state { trace!(target: LOG_TARGET, "Peer {} is in the ancestor search state.", peer_id); - return None + return None; } let peer_info = is_best.then(|| { @@ -1102,7 +1125,7 @@ where if let Some(target) = self.fork_targets.get_mut(&hash) { target.peers.insert(peer_id); } - return peer_info + return peer_info; } if ancient_parent { @@ -1113,7 +1136,7 @@ where hash, announce.header, ); - return peer_info + return peer_info; } if self.status().state == SyncState::Idle { @@ -1281,7 +1304,7 @@ where for (n, peer) in self.peers.iter_mut() { if let PeerSyncState::AncestorSearch { .. } = peer.state { // Wait for ancestry search to complete first. - continue + continue; } let new_common_number = if peer.best_number >= number { number } else { peer.best_number }; @@ -1401,7 +1424,7 @@ where /// What is the status of the block corresponding to the given hash? fn block_status(&self, hash: &B::Hash) -> Result { if self.queue_blocks.contains(hash) { - return Ok(BlockStatus::Queued) + return Ok(BlockStatus::Queued); } self.client.block_status(*hash) } @@ -1521,12 +1544,12 @@ where /// Get block requests scheduled by sync to be sent out. fn block_requests(&mut self) -> Vec<(PeerId, BlockRequest)> { if self.allowed_requests.is_empty() || self.state_sync.is_some() { - return Vec::new() + return Vec::new(); } if self.queue_blocks.len() > MAX_IMPORTING_BLOCKS { trace!(target: LOG_TARGET, "Too many blocks in the queue."); - return Vec::new() + return Vec::new(); } let is_major_syncing = self.status().state.is_major_syncing(); let attrs = self.required_block_attributes(); @@ -1550,7 +1573,7 @@ where !allowed_requests.contains(&id) || !disconnected_peers.is_peer_available(&id) { - return None + return None; } // If our best queued is more than `MAX_BLOCKS_TO_LOOK_BACKWARDS` blocks away from @@ -1648,17 +1671,17 @@ where /// Get a state request scheduled by sync to be sent out (if any). fn state_request(&mut self) -> Option<(PeerId, OpaqueStateRequest)> { if self.allowed_requests.is_empty() { - return None + return None; } if self.state_sync.is_some() && self.peers.iter().any(|(_, peer)| peer.state == PeerSyncState::DownloadingState) { // Only one pending state request is allowed. - return None + return None; } if let Some(sync) = &self.state_sync { if sync.is_complete() { - return None + return None; } for (id, peer) in self.peers.iter_mut() { @@ -1670,7 +1693,7 @@ where let request = sync.next_request(); trace!(target: LOG_TARGET, "New StateRequest for {}: {:?}", id, request); self.allowed_requests.clear(); - return Some((*id, OpaqueStateRequest(Box::new(request)))) + return Some((*id, OpaqueStateRequest(Box::new(request)))); } } } @@ -1709,7 +1732,7 @@ where sync.import(*response) } else { debug!(target: LOG_TARGET, "Ignored obsolete state response from {peer_id}"); - return Err(BadPeer(*peer_id, rep::NOT_REQUESTED)) + return Err(BadPeer(*peer_id, rep::NOT_REQUESTED)); }; match import_result { @@ -1765,16 +1788,17 @@ where } for (result, hash) in results { if has_error { - break + break; } has_error |= result.is_err(); match result { - Ok(BlockImportStatus::ImportedKnown(number, peer_id)) => + Ok(BlockImportStatus::ImportedKnown(number, peer_id)) => { if let Some(peer) = peer_id { self.update_peer_common_number(&peer, number); - }, + } + }, Ok(BlockImportStatus::ImportedUnknown(number, aux, peer_id)) => { if aux.clear_justification_requests { trace!( @@ -1882,6 +1906,12 @@ where /// Get pending actions to perform. #[must_use] pub fn actions(&mut self) -> impl Iterator> { + if !self.peers.is_empty() && self.queue_blocks.is_empty() { + if let Some((hash, number, skip_proofs)) = self.pending_state_sync_attempt.take() { + self.attempt_state_sync(hash, number, skip_proofs); + } + } + let block_requests = self .block_requests() .into_iter() @@ -1964,7 +1994,7 @@ fn handle_ancestor_search_state( if block_hash_match && next_distance_to_tip == One::one() { // We found the ancestor in the first step so there is no need to execute binary // search. - return None + return None; } if block_hash_match { let left = curr_block_num; @@ -1983,7 +2013,7 @@ fn handle_ancestor_search_state( }, AncestorSearchState::BinarySearch(mut left, mut right) => { if left >= curr_block_num { - return None + return None; } if block_hash_match { left = curr_block_num; @@ -2014,7 +2044,7 @@ fn peer_block_request( ) -> Option<(Range>, BlockRequest)> { if best_num >= peer.best_number { // Will be downloaded as alternative fork instead. - return None + return None; } else if peer.common_number < finalized { trace!( target: LOG_TARGET, @@ -2103,7 +2133,7 @@ fn fork_sync_request( hash, r.number, ); - return false + return false; } if check_block(hash) != BlockStatus::Unknown { trace!( @@ -2112,7 +2142,7 @@ fn fork_sync_request( hash, r.number, ); - return false + return false; } true }); @@ -2121,7 +2151,7 @@ fn fork_sync_request( } for (hash, r) in fork_targets { if !r.peers.contains(&id) { - continue + continue; } // Download the fork only if it is behind or not too far ahead our tip of the chain // Otherwise it should be downloaded in full sync mode. @@ -2148,7 +2178,7 @@ fn fork_sync_request( direction: Direction::Descending, max: Some(count), }, - )) + )); } else { trace!(target: LOG_TARGET, "Fork too far in the future: {:?} (#{})", hash, r.number); } @@ -2167,7 +2197,7 @@ where T: HeaderMetadata + ?Sized, { if base == block { - return Ok(false) + return Ok(false); } let ancestor = sp_blockchain::lowest_common_ancestor(client, *block, *base)?; @@ -2194,7 +2224,7 @@ pub fn validate_blocks( blocks.len(), ); - return Err(BadPeer(*peer_id, rep::NOT_REQUESTED)) + return Err(BadPeer(*peer_id, rep::NOT_REQUESTED)); } let block_header = @@ -2214,7 +2244,7 @@ pub fn validate_blocks( block_header, ); - return Err(BadPeer(*peer_id, rep::NOT_REQUESTED)) + return Err(BadPeer(*peer_id, rep::NOT_REQUESTED)); } if request.fields.contains(BlockAttributes::HEADER) && @@ -2225,7 +2255,7 @@ pub fn validate_blocks( "Missing requested header for a block in response from {peer_id}.", ); - return Err(BadPeer(*peer_id, rep::BAD_RESPONSE)) + return Err(BadPeer(*peer_id, rep::BAD_RESPONSE)); } if request.fields.contains(BlockAttributes::BODY) && blocks.iter().any(|b| b.body.is_none()) @@ -2235,7 +2265,7 @@ pub fn validate_blocks( "Missing requested body for a block in response from {peer_id}.", ); - return Err(BadPeer(*peer_id, rep::BAD_RESPONSE)) + return Err(BadPeer(*peer_id, rep::BAD_RESPONSE)); } } @@ -2250,7 +2280,7 @@ pub fn validate_blocks( b.hash, hash, ); - return Err(BadPeer(*peer_id, rep::BAD_BLOCK)) + return Err(BadPeer(*peer_id, rep::BAD_BLOCK)); } } if let (Some(header), Some(body)) = (&b.header, &b.body) { @@ -2268,7 +2298,7 @@ pub fn validate_blocks( expected, got, ); - return Err(BadPeer(*peer_id, rep::BAD_BLOCK)) + return Err(BadPeer(*peer_id, rep::BAD_BLOCK)); } } } From 9079f36b99ee0910762ce6020ca9dfd6671f77b3 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Tue, 10 Sep 2024 11:48:09 +0200 Subject: [PATCH 48/66] [WIP][ci] GHA improvements (#5653) Changes in PR: - enables `Check Cargo Check Runtimes` (it was accidentally disabled) - reorder jobs in test-misc to make it faster - add `quick-benchmarks-omni` to `check-frame-omni-bencher` (the workflow is currently disabled) --- .../workflows/check-cargo-check-runtimes.yml | 33 ++++++++----- .../workflows/check-frame-omni-bencher.yml | 48 ++++++++++++++++--- .github/workflows/tests-misc.yml | 11 +++-- 3 files changed, 68 insertions(+), 24 deletions(-) diff --git a/.github/workflows/check-cargo-check-runtimes.yml b/.github/workflows/check-cargo-check-runtimes.yml index 6325033d214c..ea0a4b943213 100644 --- a/.github/workflows/check-cargo-check-runtimes.yml +++ b/.github/workflows/check-cargo-check-runtimes.yml @@ -2,27 +2,34 @@ name: Check Cargo Check Runtimes on: pull_request: - types: [opened, synchronize, reopened, ready_for_review, labeled] + types: [opened, synchronize, reopened, ready_for_review] # Jobs in this workflow depend on each other, only for limiting peak amount of spawned workers jobs: - # GitHub Actions allows using 'env' in a container context. - # However, env variables don't work for forks: https://github.com/orgs/community/discussions/44322 - # This workaround sets the container image for each job using 'set-image' job output. set-image: - if: contains(github.event.label.name, 'GHA-migration') || contains(github.event.pull_request.labels.*.name, 'GHA-migration') + # GitHub Actions allows using 'env' in a container context. + # However, env variables don't work for forks: https://github.com/orgs/community/discussions/44322 + # This workaround sets the container image for each job using 'set-image' job output. runs-on: ubuntu-latest - timeout-minutes: 20 outputs: IMAGE: ${{ steps.set_image.outputs.IMAGE }} + RUNNER: ${{ steps.set_runner.outputs.RUNNER }} steps: - name: Checkout uses: actions/checkout@v4 - id: set_image run: cat .github/env >> $GITHUB_OUTPUT + - id: set_runner + run: | + # Run merge queues on persistent runners + if [[ $GITHUB_REF_NAME == *"gh-readonly-queue"* ]]; then + echo "RUNNER=arc-runners-polkadot-sdk-beefy-persistent" >> $GITHUB_OUTPUT + else + echo "RUNNER=arc-runners-polkadot-sdk-beefy" >> $GITHUB_OUTPUT + fi check-runtime-assets: - runs-on: arc-runners-polkadot-sdk-beefy + runs-on: ${{ needs.set-image.outputs.RUNNER }} needs: [set-image] timeout-minutes: 20 container: @@ -36,7 +43,7 @@ jobs: root: cumulus/parachains/runtimes/assets check-runtime-collectives: - runs-on: arc-runners-polkadot-sdk-beefy + runs-on: ${{ needs.set-image.outputs.RUNNER }} needs: [check-runtime-assets, set-image] timeout-minutes: 20 container: @@ -50,7 +57,7 @@ jobs: root: cumulus/parachains/runtimes/collectives check-runtime-coretime: - runs-on: arc-runners-polkadot-sdk-beefy + runs-on: ${{ needs.set-image.outputs.RUNNER }} container: image: ${{ needs.set-image.outputs.IMAGE }} needs: [check-runtime-assets, set-image] @@ -64,7 +71,7 @@ jobs: root: cumulus/parachains/runtimes/coretime check-runtime-bridge-hubs: - runs-on: arc-runners-polkadot-sdk-beefy + runs-on: ${{ needs.set-image.outputs.RUNNER }} container: image: ${{ needs.set-image.outputs.IMAGE }} needs: [set-image] @@ -78,7 +85,7 @@ jobs: root: cumulus/parachains/runtimes/bridge-hubs check-runtime-contracts: - runs-on: arc-runners-polkadot-sdk-beefy + runs-on: ${{ needs.set-image.outputs.RUNNER }} container: image: ${{ needs.set-image.outputs.IMAGE }} needs: [check-runtime-collectives, set-image] @@ -92,7 +99,7 @@ jobs: root: cumulus/parachains/runtimes/contracts check-runtime-starters: - runs-on: arc-runners-polkadot-sdk-beefy + runs-on: ${{ needs.set-image.outputs.RUNNER }} container: image: ${{ needs.set-image.outputs.IMAGE }} needs: [check-runtime-assets, set-image] @@ -106,7 +113,7 @@ jobs: root: cumulus/parachains/runtimes/starters check-runtime-testing: - runs-on: arc-runners-polkadot-sdk-beefy + runs-on: ${{ needs.set-image.outputs.RUNNER }} container: image: ${{ needs.set-image.outputs.IMAGE }} needs: [check-runtime-starters, set-image] diff --git a/.github/workflows/check-frame-omni-bencher.yml b/.github/workflows/check-frame-omni-bencher.yml index e9db2d912979..e035a30c7c22 100644 --- a/.github/workflows/check-frame-omni-bencher.yml +++ b/.github/workflows/check-frame-omni-bencher.yml @@ -5,7 +5,7 @@ on: branches: - master pull_request: - types: [ opened, synchronize, reopened, ready_for_review, labeled ] + types: [opened, synchronize, reopened, ready_for_review, labeled] merge_group: concurrency: @@ -28,19 +28,46 @@ jobs: # However, env variables don't work for forks: https://github.com/orgs/community/discussions/44322 # This workaround sets the container image for each job using 'set-image' job output. runs-on: ubuntu-latest - needs: changes - if: ${{ needs.changes.outputs.rust }} outputs: IMAGE: ${{ steps.set_image.outputs.IMAGE }} + RUNNER: ${{ steps.set_runner.outputs.RUNNER }} steps: - name: Checkout uses: actions/checkout@v4 - id: set_image run: cat .github/env >> $GITHUB_OUTPUT + - id: set_runner + run: | + # Run merge queues on persistent runners + if [[ $GITHUB_REF_NAME == *"gh-readonly-queue"* ]]; then + echo "RUNNER=arc-runners-polkadot-sdk-beefy-persistent" >> $GITHUB_OUTPUT + else + echo "RUNNER=arc-runners-polkadot-sdk-beefy" >> $GITHUB_OUTPUT + fi + + quick-benchmarks-omni: + runs-on: ${{ needs.set-image.outputs.RUNNER }} + needs: [set-image, changes] + if: ${{ needs.changes.outputs.rust }} + env: + RUSTFLAGS: "-C debug-assertions" + RUST_BACKTRACE: "full" + WASM_BUILD_NO_COLOR: 1 + WASM_BUILD_RUSTFLAGS: "-C debug-assertions" + timeout-minutes: 30 + container: + image: ${{ needs.set-image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: script + run: | + forklift cargo build --locked --quiet --release -p asset-hub-westend-runtime --features runtime-benchmarks + forklift cargo run --locked --release -p frame-omni-bencher --quiet -- v1 benchmark pallet --runtime target/release/wbuild/asset-hub-westend-runtime/asset_hub_westend_runtime.compact.compressed.wasm --all --steps 2 --repeat 1 --quiet run-frame-omni-bencher: - runs-on: arc-runners-polkadot-sdk-beefy - needs: [ set-image, changes ] # , build-frame-omni-bencher ] + runs-on: ${{ needs.set-image.outputs.RUNNER }} + needs: [set-image, changes] # , build-frame-omni-bencher ] if: ${{ needs.changes.outputs.rust }} timeout-minutes: 30 strategy: @@ -81,5 +108,14 @@ jobs: runs-on: ubuntu-latest name: All benchmarks passed needs: run-frame-omni-bencher + if: always() && !cancelled() steps: - - run: echo '### Good job! All the benchmarks passed 🚀' >> $GITHUB_STEP_SUMMARY + - run: | + tee resultfile <<< '${{ toJSON(needs) }}' + FAILURES=$(cat resultfile | grep '"result": "failure"' | wc -l) + if [ $FAILURES -gt 0 ]; then + echo "### At least one required job failed ❌" >> $GITHUB_STEP_SUMMARY + exit 1 + else + echo '### Good job! All the required jobs passed 🚀' >> $GITHUB_STEP_SUMMARY + fi diff --git a/.github/workflows/tests-misc.yml b/.github/workflows/tests-misc.yml index 97ad86e3998e..e4f0d6575c53 100644 --- a/.github/workflows/tests-misc.yml +++ b/.github/workflows/tests-misc.yml @@ -93,7 +93,7 @@ jobs: test-frame-ui: timeout-minutes: 60 - needs: [set-image, test-frame-examples-compile-to-wasm] + needs: [set-image] runs-on: ${{ needs.set-image.outputs.RUNNER }} container: image: ${{ needs.set-image.outputs.IMAGE }} @@ -121,7 +121,7 @@ jobs: test-deterministic-wasm: timeout-minutes: 20 - needs: [set-image] + needs: [set-image, test-frame-examples-compile-to-wasm] runs-on: ${{ needs.set-image.outputs.RUNNER }} container: image: ${{ needs.set-image.outputs.IMAGE }} @@ -148,7 +148,7 @@ jobs: timeout-minutes: 60 strategy: matrix: - branch: [ master, current ] + branch: [master, current] runs-on: ${{ needs.set-image.outputs.RUNNER }} container: image: ${{ needs.set-image.outputs.IMAGE }} @@ -158,7 +158,7 @@ jobs: with: # if branch is master, use the branch, otherwise set empty string, so it uses the current context # either PR (including forks) or merge group (main repo) - ref: ${{ matrix.branch == 'master' && matrix.branch || '' }} + ref: ${{ matrix.branch == 'master' && matrix.branch || '' }} - name: script run: | @@ -181,7 +181,8 @@ jobs: node-bench-regression-guard: timeout-minutes: 20 if: always() && !cancelled() - runs-on: arc-runners-polkadot-sdk + # runs-on: arc-runners-polkadot-sdk + runs-on: ubuntu-latest needs: [set-image, cargo-check-benches] steps: - name: Checkout From 3779af2bb81cb9f022c1b9d3567613ede8049629 Mon Sep 17 00:00:00 2001 From: Ron Date: Tue, 10 Sep 2024 19:21:51 +0800 Subject: [PATCH 49/66] Update bridges/snowbridge/primitives/router/src/inbound/mod.rs Co-authored-by: Adrian Catangiu --- bridges/snowbridge/primitives/router/src/inbound/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bridges/snowbridge/primitives/router/src/inbound/mod.rs b/bridges/snowbridge/primitives/router/src/inbound/mod.rs index 6c107cfa07c2..10630fc2ad84 100644 --- a/bridges/snowbridge/primitives/router/src/inbound/mod.rs +++ b/bridges/snowbridge/primitives/router/src/inbound/mod.rs @@ -394,7 +394,7 @@ impl< asset_hub_fee: u128, ) -> Result<(Xcm<()>, Balance), ConvertMessageError> { let network = Ethereum { chain_id }; - let asset_hub_fee_asset: Asset = (Location::parent(), asset_hub_fee / 2).into(); + let asset_hub_fee_asset: Asset = (Location::parent(), asset_hub_fee).into(); let (dest_para_id, beneficiary, dest_para_fee) = match destination { // Final destination is a 32-byte account on AssetHub From 96c01dc5dc8750dc96458326b6170242e0909e73 Mon Sep 17 00:00:00 2001 From: Ron Date: Tue, 10 Sep 2024 19:22:26 +0800 Subject: [PATCH 50/66] Update bridges/snowbridge/pallets/system/src/tests.rs Co-authored-by: Adrian Catangiu --- bridges/snowbridge/pallets/system/src/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bridges/snowbridge/pallets/system/src/tests.rs b/bridges/snowbridge/pallets/system/src/tests.rs index d436724caba6..51cb377b24a7 100644 --- a/bridges/snowbridge/pallets/system/src/tests.rs +++ b/bridges/snowbridge/pallets/system/src/tests.rs @@ -688,7 +688,7 @@ fn register_all_tokens_succeeds() { }, // USDT TokenInfo { - location: Location::new(1, [Parachain(1000), PalletInstance(50), GeneralIndex(1084)]), + location: Location::new(1, [Parachain(1000), PalletInstance(50), GeneralIndex(1984)]), metadata: AssetMetadata { decimals: 6, name: b"USDT".to_vec().try_into().unwrap(), From a802127d9248f6e006b5597cb56cae0e100e0cdc Mon Sep 17 00:00:00 2001 From: ron Date: Tue, 10 Sep 2024 20:57:41 +0800 Subject: [PATCH 51/66] Use full location of Ethereum --- bridges/snowbridge/pallets/system/src/lib.rs | 6 +++--- bridges/snowbridge/pallets/system/src/mock.rs | 2 +- .../bridge-hub-rococo/src/bridge_to_ethereum_config.rs | 4 ++-- .../bridge-hub-westend/src/bridge_to_ethereum_config.rs | 4 ++-- cumulus/parachains/runtimes/constants/src/rococo.rs | 3 ++- cumulus/parachains/runtimes/constants/src/westend.rs | 3 ++- 6 files changed, 12 insertions(+), 10 deletions(-) diff --git a/bridges/snowbridge/pallets/system/src/lib.rs b/bridges/snowbridge/pallets/system/src/lib.rs index 9712f589b808..8f3235279956 100644 --- a/bridges/snowbridge/pallets/system/src/lib.rs +++ b/bridges/snowbridge/pallets/system/src/lib.rs @@ -177,8 +177,8 @@ pub mod pallet { /// This chain's Universal Location. type UniversalLocation: Get; - // The bridges configured Ethereum network with chain id. - type EthereumNetwork: Get; + // The bridges configured Ethereum location + type EthereumLocation: Get; #[cfg(feature = "runtime-benchmarks")] type Helper: BenchmarkHelper; @@ -732,7 +732,7 @@ pub mod pallet { metadata: AssetMetadata, pays_fee: PaysFee, ) -> Result<(), DispatchError> { - let ethereum_location = Location::new(2, [GlobalConsensus(T::EthereumNetwork::get())]); + let ethereum_location = T::EthereumLocation::get(); // reanchor to Ethereum context let location = location .clone() diff --git a/bridges/snowbridge/pallets/system/src/mock.rs b/bridges/snowbridge/pallets/system/src/mock.rs index f70641288250..47b089866a53 100644 --- a/bridges/snowbridge/pallets/system/src/mock.rs +++ b/bridges/snowbridge/pallets/system/src/mock.rs @@ -210,7 +210,7 @@ impl crate::Config for Test { type WeightInfo = (); type InboundDeliveryCost = InboundDeliveryCost; type UniversalLocation = UniversalLocation; - type EthereumNetwork = EthereumNetwork; + type EthereumLocation = EthereumDestination; #[cfg(feature = "runtime-benchmarks")] type Helper = (); } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs index fde214ed42d6..d9a3869bd6e1 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_ethereum_config.rs @@ -29,7 +29,7 @@ use sp_core::H160; use testnet_parachains_constants::rococo::{ currency::*, fee::WeightToFee, - snowbridge::{EthereumNetwork, INBOUND_QUEUE_PALLET_INDEX}, + snowbridge::{EthereumLocation, EthereumNetwork, INBOUND_QUEUE_PALLET_INDEX}, }; use crate::xcm_config::RelayNetwork; @@ -190,7 +190,7 @@ impl snowbridge_pallet_system::Config for Runtime { type DefaultPricingParameters = Parameters; type InboundDeliveryCost = EthereumInboundQueue; type UniversalLocation = UniversalLocation; - type EthereumNetwork = EthereumNetwork; + type EthereumLocation = EthereumLocation; } #[cfg(feature = "runtime-benchmarks")] diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs index 4b3f61a42ad4..1bd425ab4075 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs @@ -30,7 +30,7 @@ use sp_core::H160; use testnet_parachains_constants::westend::{ currency::*, fee::WeightToFee, - snowbridge::{EthereumNetwork, INBOUND_QUEUE_PALLET_INDEX}, + snowbridge::{EthereumLocation, EthereumNetwork, INBOUND_QUEUE_PALLET_INDEX}, }; use crate::xcm_config::RelayNetwork; @@ -189,7 +189,7 @@ impl snowbridge_pallet_system::Config for Runtime { type DefaultPricingParameters = Parameters; type InboundDeliveryCost = EthereumInboundQueue; type UniversalLocation = UniversalLocation; - type EthereumNetwork = EthereumNetwork; + type EthereumLocation = EthereumLocation; } #[cfg(feature = "runtime-benchmarks")] diff --git a/cumulus/parachains/runtimes/constants/src/rococo.rs b/cumulus/parachains/runtimes/constants/src/rococo.rs index 56f4868371c1..6c5380df2bfb 100644 --- a/cumulus/parachains/runtimes/constants/src/rococo.rs +++ b/cumulus/parachains/runtimes/constants/src/rococo.rs @@ -148,7 +148,7 @@ pub mod time { pub mod snowbridge { use frame_support::parameter_types; - use xcm::opaque::lts::NetworkId; + use xcm::prelude::{Location, NetworkId}; /// The pallet index of the Ethereum inbound queue pallet in the bridge hub runtime. pub const INBOUND_QUEUE_PALLET_INDEX: u8 = 80; @@ -159,6 +159,7 @@ pub mod snowbridge { /// /// pub EthereumNetwork: NetworkId = NetworkId::Ethereum { chain_id: 11155111 }; + pub EthereumLocation: Location = Location::new(2,EthereumNetwork::get()); } } diff --git a/cumulus/parachains/runtimes/constants/src/westend.rs b/cumulus/parachains/runtimes/constants/src/westend.rs index fec66cec2eb6..a4667f83fbef 100644 --- a/cumulus/parachains/runtimes/constants/src/westend.rs +++ b/cumulus/parachains/runtimes/constants/src/westend.rs @@ -171,7 +171,7 @@ pub mod time { pub mod snowbridge { use frame_support::parameter_types; - use xcm::opaque::lts::NetworkId; + use xcm::prelude::{Location, NetworkId}; /// The pallet index of the Ethereum inbound queue pallet in the bridge hub runtime. pub const INBOUND_QUEUE_PALLET_INDEX: u8 = 80; @@ -182,5 +182,6 @@ pub mod snowbridge { /// /// pub EthereumNetwork: NetworkId = NetworkId::Ethereum { chain_id: 11155111 }; + pub EthereumLocation: Location = Location::new(2,EthereumNetwork::get()); } } From baf5b27b37c0c26420d7ecd55ef2c3878da66bb4 Mon Sep 17 00:00:00 2001 From: ron Date: Tue, 10 Sep 2024 20:57:49 +0800 Subject: [PATCH 52/66] Fix test --- bridges/snowbridge/pallets/system/src/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bridges/snowbridge/pallets/system/src/tests.rs b/bridges/snowbridge/pallets/system/src/tests.rs index 51cb377b24a7..b3699ec2f24d 100644 --- a/bridges/snowbridge/pallets/system/src/tests.rs +++ b/bridges/snowbridge/pallets/system/src/tests.rs @@ -695,7 +695,7 @@ fn register_all_tokens_succeeds() { symbol: b"USDT".to_vec().try_into().unwrap(), }, foreign_token_id: hex!( - "d49fe2118be0cca618e4d171e60ffea98b7b648dd80dc37d6342116b910b7aa5" + "14b0579be12d7d7f9971f1d4b41f0e88384b9b74799b0150d4aa6cd01afb4444" ) .into(), }, From d887804b235003282b26d4202f40c79fb302f16c Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Tue, 10 Sep 2024 15:28:28 +0200 Subject: [PATCH 53/66] [Bot] Use correct token in backport bot (#5654) The backport bot does currently not trigger the CI when opening a MR, like here: https://github.com/paritytech/polkadot-sdk/pull/5651 Devs need to push an empty commit manually. Now using a token that will also trigger the CI. --------- Signed-off-by: Oliver Tale-Yazdi --- .github/workflows/command-backport.yml | 32 ++++++++++++++++++++++++-- 1 file changed, 30 insertions(+), 2 deletions(-) diff --git a/.github/workflows/command-backport.yml b/.github/workflows/command-backport.yml index 1ad68d96a63f..85e7b77801d7 100644 --- a/.github/workflows/command-backport.yml +++ b/.github/workflows/command-backport.yml @@ -27,15 +27,24 @@ jobs: steps: - uses: actions/checkout@v4 + - name: Generate token + id: generate_token + uses: tibdex/github-app-token@v2.1.0 + with: + app_id: ${{ secrets.CMD_BOT_APP_ID }} + private_key: ${{ secrets.CMD_BOT_APP_KEY }} + - name: Create backport pull requests uses: korthout/backport-action@v3 id: backport with: target_branches: stable2407 stable2409 merge_commits: skip - github_token: ${{ secrets.GITHUB_TOKEN }} + github_token: ${{ steps.generate_token.outputs.token }} pull_description: | - Backport #${pull_number} into `${target_branch}` (cc @${pull_author}). + Backport #${pull_number} into `${target_branch}` from ${pull_author}. + + See the [documentation](https://github.com/paritytech/polkadot-sdk/blob/master/docs/BACKPORT.md) on how to use this bot.