diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index bfdb5bb3d092a..fa986923708d3 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -269,16 +269,6 @@ check-signed-tag: script: - ./.maintain/gitlab/check_signed.sh -check-line-width: - stage: check - image: paritytech/tools:latest - <<: *kubernetes-env - rules: - - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - script: - - ./.maintain/gitlab/check_line_width.sh - allow_failure: true - test-dependency-rules: stage: check image: paritytech/tools:latest @@ -516,7 +506,7 @@ test-wasmer-sandbox: variables: <<: *default-vars script: - - time cargo test --release --features runtime-benchmarks,wasmer-sandbox + - time cargo test --release --features runtime-benchmarks,wasmer-sandbox,disable-ui-tests - sccache -s cargo-check-macos: @@ -618,6 +608,7 @@ build-rustdoc: variables: <<: *default-vars SKIP_WASM_BUILD: 1 + DOC_INDEX_PAGE: "sc_service/index.html" # default redirected page artifacts: name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}-doc" when: on_success @@ -632,7 +623,7 @@ build-rustdoc: - mv ./target/doc ./crate-docs # FIXME: remove me after CI image gets nonroot - chown -R nonroot:nonroot ./crate-docs - - echo "" > ./crate-docs/index.html + - echo "" > ./crate-docs/index.html - sccache -s #### stage: publish @@ -662,8 +653,8 @@ build-rustdoc: --tag "$IMAGE_NAME:$VERSION" --tag "$IMAGE_NAME:latest" --file "$DOCKERFILE" . - - echo "$DOCKER_HUB_USER" | - buildah login --username "$DOCKER_HUB_PASS" --password-stdin docker.io + - echo "$DOCKER_HUB_PASS" | + buildah login --username "$DOCKER_HUB_USER" --password-stdin docker.io - buildah info - buildah push --format=v2s2 "$IMAGE_NAME:$VERSION" - buildah push --format=v2s2 "$IMAGE_NAME:latest" @@ -728,42 +719,75 @@ publish-rustdoc: stage: publish <<: *kubernetes-env <<: *vault-secrets - image: paritytech/tools:latest + image: node:16 variables: GIT_DEPTH: 100 + # --- Following variables are for rustdocs deployment --- + # Space separated values of branches and tags to generate rustdocs + RUSTDOCS_DEPLOY_REFS: "master monthly-2021-09+1 monthly-2021-08 v3.0.0" + # Location of the docs index template + INDEX_TPL: ".maintain/docs-index-tpl.ejs" + # Where the `/latest` symbolic link links to. One of the $RUSTDOCS_DEPLOY_REFS value. + LATEST: "monthly-2021-09+1" rules: - if: $CI_PIPELINE_SOURCE == "pipeline" when: never - if: $CI_PIPELINE_SOURCE == "web" && $CI_COMMIT_REF_NAME == "master" - if: $CI_COMMIT_REF_NAME == "master" + - if: $CI_COMMIT_REF_NAME =~ /^monthly-20[0-9]{2}-[0-9]{2}.*$/ # to support: monthly-2021-09+1 + - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 # `needs:` can be removed after CI image gets nonroot. In this case `needs:` stops other # artifacts from being dowloaded by this job. needs: - job: build-rustdoc artifacts: true script: + # If $CI_COMMIT_REF_NAME doesn't match one of $RUSTDOCS_DEPLOY_REFS space-separated values, we + # exit immediately. + # Putting spaces at the front and back to ensure we are not matching just any substring, but the + # whole space-separated value. + - '[[ " ${RUSTDOCS_DEPLOY_REFS} " =~ " ${CI_COMMIT_REF_NAME} " ]] || exit 0' - rm -rf /tmp/* # Set git config - rm -rf .git/config - git config user.email "devops-team@parity.io" - git config user.name "${GITHUB_USER}" - - git config remote.origin.url "https://${GITHUB_TOKEN}@github.com/paritytech/substrate.git" + - git config remote.origin.url "https://${GITHUB_TOKEN}@github.com/paritytech/${CI_PROJECT_NAME}.git" - git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*" - git fetch origin gh-pages + # Install `ejs` and generate index.html based on RUSTDOCS_DEPLOY_REFS + - yarn global add ejs + - 'ejs ${INDEX_TPL} -i "{\"deploy_refs\":\"${RUSTDOCS_DEPLOY_REFS}\",\"repo_name\":\"${CI_PROJECT_NAME}\",\"latest\":\"${LATEST}\"}" > /tmp/index.html' # Save README and docs - cp -r ./crate-docs/ /tmp/doc/ - cp README.md /tmp/doc/ - git checkout gh-pages - # Remove everything and restore generated docs and README - - rm -rf ./* - - mv /tmp/doc/* . + # Remove directories no longer necessary, as specified in $RUSTDOCS_DEPLOY_REFS. + # Also ensure $RUSTDOCS_DEPLOY_REFS is not just empty spaces. + # Even though this block spans multiple lines, they are concatenated to run as a single line + # command, so note for the semi-colons in the inner-most code block. + - if [[ ! -z ${RUSTDOCS_DEPLOY_REFS// } ]]; then + for FILE in *; do + if [[ ! " $RUSTDOCS_DEPLOY_REFS " =~ " $FILE " ]]; then + echo "Removing ${FILE}..."; + rm -rf $FILE; + fi + done + fi + # Move the index page & built back + - mv -f /tmp/index.html . + # Ensure the destination dir doesn't exist. + - rm -rf ${CI_COMMIT_REF_NAME} + - mv -f /tmp/doc ${CI_COMMIT_REF_NAME} + # Add the symlink + - '[[ -e "$LATEST" ]] && ln -sf "${LATEST}" latest' # Upload files - git add --all --force # `git commit` has an exit code of > 0 if there is nothing to commit. # This causes GitLab to exit immediately and marks this job failed. # We don't want to mark the entire job failed if there's nothing to # publish though, hence the `|| true`. - - git commit -m "Updated docs for ${CI_COMMIT_REF_NAME}" || + - git commit -m "___Updated docs for ${CI_COMMIT_REF_NAME}___" || echo "___Nothing to commit___" - git push origin gh-pages --force after_script: diff --git a/.maintain/docs-index-tpl.ejs b/.maintain/docs-index-tpl.ejs new file mode 100644 index 0000000000000..81c619a926b2b --- /dev/null +++ b/.maintain/docs-index-tpl.ejs @@ -0,0 +1,55 @@ +<% + const capFirst = s => (s && s[0].toUpperCase() + s.slice(1)) || ""; +%> + + + + + + + + <%= capFirst(repo_name) %> Rustdocs + + + + + + +
+
+

<%= capFirst(repo_name) %> Rustdocs

+
+
    + <%_ deploy_refs.split(/\s+/).forEach(ref => { _%> +
  • + <%- ref -%> + <%_ if (latest && latest.trim() !== '' && latest === ref) { _%> + (latest) + <%_ } _%> +
  • + <%_ }) _%> +
+
+
+
+ + diff --git a/.maintain/gitlab/check_line_width.sh b/.maintain/gitlab/check_line_width.sh deleted file mode 100755 index ebab3013e4b48..0000000000000 --- a/.maintain/gitlab/check_line_width.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/sh -# -# check if line width of rust source files is not beyond x characters -# -set -e -set -o pipefail - -BASE_ORIGIN="origin" -BASE_BRANCH_NAME="master" -LINE_WIDTH="120" -GOOD_LINE_WIDTH="100" -BASE_BRANCH="${BASE_ORIGIN}/${BASE_BRANCH_NAME}" -git fetch ${BASE_ORIGIN} ${BASE_BRANCH_NAME} --depth 100 -BASE_HASH=$(git merge-base ${BASE_BRANCH} HEAD) - -git diff --name-only ${BASE_HASH} -- \*.rs | ( while read file -do - if [ ! -f ${file} ]; - then - echo "Skipping removed file." - elif git diff ${BASE_HASH} -- ${file} | grep -q "^+.\{$(( $LINE_WIDTH + 1 ))\}" - then - if [ -z "${FAIL}" ] - then - echo "| error!" - echo "| Lines must not be longer than ${LINE_WIDTH} characters." - echo "| " - echo "| see more https://github.com/paritytech/substrate/blob/master/docs/STYLE_GUIDE.md" - echo "|" - FAIL="true" - fi - echo "| file: ${file}" - git diff ${BASE_HASH} -- ${file} \ - | grep -n "^+.\{$(( $LINE_WIDTH + 1))\}" - echo "|" - else - if git diff ${BASE_HASH} -- ${file} | grep -q "^+.\{$(( $GOOD_LINE_WIDTH + 1 ))\}" - then - if [ -z "${FAIL}" ] - then - echo "| warning!" - echo "| Lines should be longer than ${GOOD_LINE_WIDTH} characters only in exceptional circumstances!" - echo "| " - echo "| see more https://github.com/paritytech/substrate/blob/master/docs/STYLE_GUIDE.md" - echo "|" - fi - echo "| file: ${file}" - git diff ${BASE_HASH} -- ${file} | grep -n "^+.\{$(( $GOOD_LINE_WIDTH + 1 ))\}" - echo "|" - fi - fi -done - -test -z "${FAIL}" -) diff --git a/.maintain/monitoring/alerting-rules/alerting-rules.yaml b/.maintain/monitoring/alerting-rules/alerting-rules.yaml index 7a69cba66c3f3..2711610024330 100644 --- a/.maintain/monitoring/alerting-rules/alerting-rules.yaml +++ b/.maintain/monitoring/alerting-rules/alerting-rules.yaml @@ -133,16 +133,6 @@ groups: # Others ############################################################################## - - alert: ContinuousTaskEnded - expr: '(polkadot_tasks_spawned_total{task_name != "basic-authorship-proposer", task_name != "substrate-rpc-subscription"} == 1) - - on(instance, task_name) group_left() (polkadot_tasks_ended_total == 1)' - for: 5m - labels: - severity: warning - annotations: - message: 'Continuous task {{ $labels.task_name }} on node - {{ $labels.instance }} ended unexpectedly.' - - alert: AuthorityDiscoveryDiscoveryFailureHigh expr: 'polkadot_authority_discovery_handle_value_found_event_failure / ignoring(name) diff --git a/.maintain/node-template-release/Cargo.lock b/.maintain/node-template-release/Cargo.lock new file mode 100644 index 0000000000000..ee562498c811c --- /dev/null +++ b/.maintain/node-template-release/Cargo.lock @@ -0,0 +1,616 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "ansi_term" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" +dependencies = [ + "winapi", +] + +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi", + "libc", + "winapi", +] + +[[package]] +name = "autocfg" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "cc" +version = "1.0.70" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d26a6ce4b6a484fa3edb70f7efa6fc430fd2b87285fe8b84304fd0936faa0dc0" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "clap" +version = "2.33.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002" +dependencies = [ + "ansi_term", + "atty", + "bitflags", + "strsim", + "textwrap", + "unicode-width", + "vec_map", +] + +[[package]] +name = "crc32fast" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "curl-sys" +version = "0.4.48+curl-7.79.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6a77a741f832116da66aeb126b4f19190ecf46144a74a9bde43c2086f38da0e" +dependencies = [ + "cc", + "libc", + "libz-sys", + "openssl-sys", + "pkg-config", + "vcpkg", + "winapi", +] + +[[package]] +name = "filetime" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "975ccf83d8d9d0d84682850a38c8169027be83368805971cc4f238c2b245bc98" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "winapi", +] + +[[package]] +name = "flate2" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e6988e897c1c9c485f43b47a529cef42fde0547f9d8d41a7062518f1d8fc53f" +dependencies = [ + "cfg-if", + "crc32fast", + "libc", + "miniz_oxide", +] + +[[package]] +name = "fs_extra" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2022715d62ab30faffd124d40b76f4134a550a87792276512b18d63272333394" + +[[package]] +name = "getrandom" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "git2" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7339329bfa14a00223244311560d11f8f489b453fb90092af97f267a6090ab0" +dependencies = [ + "bitflags", + "libc", + "libgit2-sys", + "log", + "openssl-probe", + "openssl-sys", + "url", +] + +[[package]] +name = "glob" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb" + +[[package]] +name = "heck" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "hermit-abi" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc", +] + +[[package]] +name = "idna" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38f09e0f0b1fb55fdee1f17470ad800da77af5186a1a76c026b679358b7e844e" +dependencies = [ + "matches", + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.103" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd8f7255a17a627354f321ef0055d63b898c6fb27eff628af4d1b66b7331edf6" + +[[package]] +name = "libgit2-sys" +version = "0.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48441cb35dc255da8ae72825689a95368bf510659ae1ad55dc4aa88cb1789bf1" +dependencies = [ + "cc", + "curl-sys", + "libc", + "libssh2-sys", + "libz-sys", + "openssl-sys", + "pkg-config", +] + +[[package]] +name = "libssh2-sys" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0186af0d8f171ae6b9c4c90ec51898bad5d08a2d5e470903a50d9ad8959cbee" +dependencies = [ + "cc", + "libc", + "libz-sys", + "openssl-sys", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "libz-sys" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de5435b8549c16d423ed0c03dbaafe57cf6c3344744f1242520d59c9d8ecec66" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "log" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "matches" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" + +[[package]] +name = "miniz_oxide" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" +dependencies = [ + "adler", + "autocfg", +] + +[[package]] +name = "node-template-release" +version = "3.0.0" +dependencies = [ + "flate2", + "fs_extra", + "git2", + "glob", + "structopt", + "tar", + "tempfile", + "toml", +] + +[[package]] +name = "openssl-probe" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" + +[[package]] +name = "openssl-sys" +version = "0.9.67" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69df2d8dfc6ce3aaf44b40dec6f487d5a886516cf6879c49e98e0710f310a058" +dependencies = [ + "autocfg", + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "percent-encoding" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" + +[[package]] +name = "pkg-config" +version = "0.3.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c9b1041b4387893b91ee6746cddfc28516aff326a3519fb2adf820932c5e6cb" + +[[package]] +name = "ppv-lite86" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" + +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] + +[[package]] +name = "proc-macro2" +version = "1.0.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9f5105d4fdaab20335ca9565e106a5d9b82b6219b5ba735731124ac6711d23d" +dependencies = [ + "unicode-xid", +] + +[[package]] +name = "quote" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", + "rand_hc", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rand_hc" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" +dependencies = [ + "rand_core", +] + +[[package]] +name = "redox_syscall" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" +dependencies = [ + "bitflags", +] + +[[package]] +name = "remove_dir_all" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +dependencies = [ + "winapi", +] + +[[package]] +name = "serde" +version = "1.0.130" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913" + +[[package]] +name = "strsim" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" + +[[package]] +name = "structopt" +version = "0.3.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf9d950ef167e25e0bdb073cf1d68e9ad2795ac826f2f3f59647817cf23c0bfa" +dependencies = [ + "clap", + "lazy_static", + "structopt-derive", +] + +[[package]] +name = "structopt-derive" +version = "0.4.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "134d838a2c9943ac3125cf6df165eda53493451b719f3255b2a26b85f772d0ba" +dependencies = [ + "heck", + "proc-macro-error", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "syn" +version = "1.0.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5239bc68e0fef57495900cfea4e8dc75596d9a319d7e16b1e0a440d24e6fe0a0" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + +[[package]] +name = "tar" +version = "0.4.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6f5515d3add52e0bbdcad7b83c388bb36ba7b754dda3b5f5bc2d38640cdba5c" +dependencies = [ + "filetime", + "libc", + "xattr", +] + +[[package]] +name = "tempfile" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" +dependencies = [ + "cfg-if", + "libc", + "rand", + "redox_syscall", + "remove_dir_all", + "winapi", +] + +[[package]] +name = "textwrap" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" +dependencies = [ + "unicode-width", +] + +[[package]] +name = "tinyvec" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83b2a3d4d9091d0abd7eba4dc2710b1718583bd4d8992e2190720ea38f391f7" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" + +[[package]] +name = "toml" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "758664fc71a3a69038656bee8b6be6477d2a6c315a6b81f7081f591bffa4111f" +dependencies = [ + "serde", +] + +[[package]] +name = "unicode-bidi" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "246f4c42e67e7a4e3c6106ff716a5d067d4132a642840b242e357e468a2a0085" + +[[package]] +name = "unicode-normalization" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "unicode-segmentation" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8895849a949e7845e06bd6dc1aa51731a103c42707010a5b591c0038fb73385b" + +[[package]] +name = "unicode-width" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" + +[[package]] +name = "unicode-xid" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" + +[[package]] +name = "url" +version = "1.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd4e7c0d531266369519a4aa4f399d748bd37043b00bde1e4ff1f60a120b355a" +dependencies = [ + "idna", + "matches", + "percent-encoding", +] + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "vec_map" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" + +[[package]] +name = "version_check" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe" + +[[package]] +name = "wasi" +version = "0.10.2+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "xattr" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "244c3741f4240ef46274860397c7c74e50eb23624996930e484c16679633a54c" +dependencies = [ + "libc", +] diff --git a/Cargo.lock b/Cargo.lock index 7754e0ae6b62f..0e3f3399dbf6c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14,20 +14,11 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7" -dependencies = [ - "gimli 0.23.0", -] - -[[package]] -name = "addr2line" -version = "0.15.1" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03345e98af8f3d786b6d9f656ccfa6ac316d954e92bc4841f0bba20789d5fb5a" +checksum = "3e61f2b7f93d2c7d2b08263acaa4a363b3e276806c68af6134c44f523bf1aacd" dependencies = [ - "gimli 0.24.0", + "gimli 0.25.0", ] [[package]] @@ -291,9 +282,9 @@ dependencies = [ [[package]] name = "async-std" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9f06685bad74e0570f5213741bea82158279a4103d988e57bfada11ad230341" +checksum = "f8056f1455169ab86dd47b47391e4ab0cbd25410a70e9fe675544f49bafaf952" dependencies = [ "async-attributes", "async-channel", @@ -415,15 +406,16 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "backtrace" -version = "0.3.56" +version = "0.3.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d117600f438b1707d4e4ae15d3595657288f8235a0eb593e80ecc98ab34e1bc" +checksum = "e7a905d892734eea339e896738c14b9afce22b5318f64b951e70bf3844419b01" dependencies = [ - "addr2line 0.14.1", + "addr2line", + "cc", "cfg-if 1.0.0", "libc", "miniz_oxide", - "object 0.23.0", + "object 0.26.0", "rustc-demangle", ] @@ -492,6 +484,80 @@ dependencies = [ "serde", ] +[[package]] +name = "beefy-gadget" +version = "4.0.0-dev" +dependencies = [ + "beefy-primitives", + "fnv", + "futures 0.3.16", + "log 0.4.14", + "parity-scale-codec", + "parking_lot 0.11.1", + "sc-client-api", + "sc-keystore", + "sc-network", + "sc-network-gossip", + "sc-network-test", + "sc-utils", + "sp-api", + "sp-application-crypto", + "sp-arithmetic", + "sp-blockchain", + "sp-core", + "sp-keystore", + "sp-runtime", + "strum 0.21.0", + "substrate-prometheus-endpoint", + "thiserror", + "wasm-timer", +] + +[[package]] +name = "beefy-gadget-rpc" +version = "4.0.0-dev" +dependencies = [ + "beefy-gadget", + "beefy-primitives", + "futures 0.3.16", + "jsonrpc-core", + "jsonrpc-core-client", + "jsonrpc-derive", + "jsonrpc-pubsub", + "log 0.4.14", + "parity-scale-codec", + "sc-rpc", + "serde", + "sp-core", + "sp-runtime", +] + +[[package]] +name = "beefy-merkle-tree" +version = "4.0.0-dev" +dependencies = [ + "env_logger 0.9.0", + "hex", + "hex-literal", + "log 0.4.14", + "tiny-keccak", +] + +[[package]] +name = "beefy-primitives" +version = "4.0.0-dev" +dependencies = [ + "hex-literal", + "parity-scale-codec", + "scale-info", + "sp-api", + "sp-application-crypto", + "sp-core", + "sp-keystore", + "sp-runtime", + "sp-std", +] + [[package]] name = "bincode" version = "1.3.2" @@ -993,11 +1059,11 @@ dependencies = [ [[package]] name = "cranelift-bforest" -version = "0.74.0" +version = "0.76.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8ca3560686e7c9c7ed7e0fe77469f2410ba5d7781b1acaa9adc8d8deea28e3e" +checksum = "7e6bea67967505247f54fa2c85cf4f6e0e31c4e5692c9b70e4ae58e339067333" dependencies = [ - "cranelift-entity 0.74.0", + "cranelift-entity 0.76.0", ] [[package]] @@ -1014,26 +1080,26 @@ dependencies = [ "gimli 0.22.0", "log 0.4.14", "regalloc", - "smallvec 1.6.1", + "smallvec 1.7.0", "target-lexicon 0.11.2", "thiserror", ] [[package]] name = "cranelift-codegen" -version = "0.74.0" +version = "0.76.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf9bf1ffffb6ce3d2e5ebc83549bd2436426c99b31cc550d521364cbe35d276" +checksum = "48194035d2752bdd5bdae429e3ab88676e95f52a2b1355a5d4e809f9e39b1d74" dependencies = [ - "cranelift-bforest 0.74.0", - "cranelift-codegen-meta 0.74.0", - "cranelift-codegen-shared 0.74.0", - "cranelift-entity 0.74.0", - "gimli 0.24.0", + "cranelift-bforest 0.76.0", + "cranelift-codegen-meta 0.76.0", + "cranelift-codegen-shared 0.76.0", + "cranelift-entity 0.76.0", + "gimli 0.25.0", "log 0.4.14", "regalloc", "serde", - "smallvec 1.6.1", + "smallvec 1.7.0", "target-lexicon 0.12.0", ] @@ -1049,12 +1115,12 @@ dependencies = [ [[package]] name = "cranelift-codegen-meta" -version = "0.74.0" +version = "0.76.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cc21936a5a6d07e23849ffe83e5c1f6f50305c074f4b2970ca50c13bf55b821" +checksum = "976efb22fcab4f2cd6bd4e9913764616a54d895c1a23530128d04e03633c555f" dependencies = [ - "cranelift-codegen-shared 0.74.0", - "cranelift-entity 0.74.0", + "cranelift-codegen-shared 0.76.0", + "cranelift-entity 0.76.0", ] [[package]] @@ -1065,9 +1131,9 @@ checksum = "6759012d6d19c4caec95793f052613e9d4113e925e7f14154defbac0f1d4c938" [[package]] name = "cranelift-codegen-shared" -version = "0.74.0" +version = "0.76.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca5b6ffaa87560bebe69a5446449da18090b126037920b0c1c6d5945f72faf6b" +checksum = "9dabb5fe66e04d4652e434195b45ae65b5c8172d520247b8f66d8df42b2b45dc" dependencies = [ "serde", ] @@ -1083,9 +1149,9 @@ dependencies = [ [[package]] name = "cranelift-entity" -version = "0.74.0" +version = "0.76.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d6b4a8bef04f82e4296782646f733c641d09497df2fabf791323fefaa44c64c" +checksum = "3329733e4d4b8e91c809efcaa4faee80bf66f20164e3dd16d707346bd3494799" dependencies = [ "serde", ] @@ -1098,47 +1164,48 @@ checksum = "b608bb7656c554d0a4cf8f50c7a10b857e80306f6ff829ad6d468a7e2323c8d8" dependencies = [ "cranelift-codegen 0.68.0", "log 0.4.14", - "smallvec 1.6.1", + "smallvec 1.7.0", "target-lexicon 0.11.2", ] [[package]] name = "cranelift-frontend" -version = "0.74.0" +version = "0.76.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c31b783b351f966fce33e3c03498cb116d16d97a8f9978164a60920bd0d3a99c" +checksum = "279afcc0d3e651b773f94837c3d581177b348c8d69e928104b2e9fccb226f921" dependencies = [ - "cranelift-codegen 0.74.0", + "cranelift-codegen 0.76.0", "log 0.4.14", - "smallvec 1.6.1", + "smallvec 1.7.0", "target-lexicon 0.12.0", ] [[package]] name = "cranelift-native" -version = "0.74.0" +version = "0.76.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a77c88d3dd48021ff1e37e978a00098524abd3513444ae252c08d37b310b3d2a" +checksum = "4c04d1fe6a5abb5bb0edc78baa8ef238370fb8e389cc88b6d153f7c3e9680425" dependencies = [ - "cranelift-codegen 0.74.0", + "cranelift-codegen 0.76.0", + "libc", "target-lexicon 0.12.0", ] [[package]] name = "cranelift-wasm" -version = "0.74.0" +version = "0.76.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edb6d408e2da77cdbbd65466298d44c86ae71c1785d2ab0d8657753cdb4d9d89" +checksum = "e0d260ad44f6fd2c91f7f5097191a2a9e3edcbb36df1fb787b600dad5ea148ec" dependencies = [ - "cranelift-codegen 0.74.0", - "cranelift-entity 0.74.0", - "cranelift-frontend 0.74.0", + "cranelift-codegen 0.76.0", + "cranelift-entity 0.76.0", + "cranelift-frontend 0.76.0", "itertools 0.10.0", "log 0.4.14", "serde", - "smallvec 1.6.1", + "smallvec 1.7.0", "thiserror", - "wasmparser 0.78.2", + "wasmparser 0.79.0", ] [[package]] @@ -1985,7 +2052,7 @@ dependencies = [ "pretty_assertions 0.6.1", "scale-info", "serde", - "smallvec 1.6.1", + "smallvec 1.7.0", "sp-arithmetic", "sp-core", "sp-inherents", @@ -2314,6 +2381,21 @@ version = "0.3.55" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2" +[[package]] +name = "generate-bags" +version = "3.0.0" +dependencies = [ + "chrono", + "frame-election-provider-support", + "frame-support", + "frame-system", + "git2", + "num-format", + "pallet-staking", + "sp-io", + "structopt", +] + [[package]] name = "generic-array" version = "0.12.4" @@ -2380,21 +2462,28 @@ dependencies = [ [[package]] name = "gimli" -version = "0.23.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce" - -[[package]] -name = "gimli" -version = "0.24.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4075386626662786ddb0ec9081e7c7eeb1ba31951f447ca780ef9f5d568189" +checksum = "f0a01e0497841a3b2db4f8afa483cce65f7e96a3498bd6c541734792aeac8fe7" dependencies = [ "fallible-iterator", "indexmap", "stable_deref_trait", ] +[[package]] +name = "git2" +version = "0.13.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "659cd14835e75b64d9dba5b660463506763cf0aa6cb640aeeb0e98d841093490" +dependencies = [ + "bitflags", + "libc", + "libgit2-sys", + "log 0.4.14", + "url 2.2.1", +] + [[package]] name = "glob" version = "0.3.0" @@ -3141,7 +3230,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "45a3f58dc069ec0e205a27f5b45920722a46faed802a0541538241af6228f512" dependencies = [ "parity-util-mem", - "smallvec 1.6.1", + "smallvec 1.7.0", ] [[package]] @@ -3170,7 +3259,7 @@ dependencies = [ "parking_lot 0.11.1", "regex", "rocksdb", - "smallvec 1.6.1", + "smallvec 1.7.0", ] [[package]] @@ -3203,6 +3292,18 @@ version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "789da6d93f1b866ffe175afc5322a4d76c038605a1c3319bb57b06967ca98a36" +[[package]] +name = "libgit2-sys" +version = "0.12.22+1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89c53ac117c44f7042ad8d8f5681378dfbc6010e49ec2c0d1f11dfedc7a4a1c3" +dependencies = [ + "cc", + "libc", + "libz-sys", + "pkg-config", +] + [[package]] name = "libloading" version = "0.5.2" @@ -3274,7 +3375,7 @@ dependencies = [ "multiaddr", "parking_lot 0.11.1", "pin-project 1.0.5", - "smallvec 1.6.1", + "smallvec 1.7.0", "wasm-timer", ] @@ -3305,7 +3406,7 @@ dependencies = [ "ring", "rw-stream-sink", "sha2 0.9.3", - "smallvec 1.6.1", + "smallvec 1.7.0", "thiserror", "unsigned-varint 0.7.0", "void", @@ -3333,7 +3434,7 @@ dependencies = [ "futures 0.3.16", "libp2p-core", "log 0.4.14", - "smallvec 1.6.1", + "smallvec 1.7.0", "trust-dns-resolver", ] @@ -3352,7 +3453,7 @@ dependencies = [ "prost", "prost-build", "rand 0.7.3", - "smallvec 1.6.1", + "smallvec 1.7.0", ] [[package]] @@ -3376,7 +3477,7 @@ dependencies = [ "rand 0.7.3", "regex", "sha2 0.9.3", - "smallvec 1.6.1", + "smallvec 1.7.0", "unsigned-varint 0.7.0", "wasm-timer", ] @@ -3393,7 +3494,7 @@ dependencies = [ "log 0.4.14", "prost", "prost-build", - "smallvec 1.6.1", + "smallvec 1.7.0", "wasm-timer", ] @@ -3416,7 +3517,7 @@ dependencies = [ "prost-build", "rand 0.7.3", "sha2 0.9.3", - "smallvec 1.6.1", + "smallvec 1.7.0", "uint", "unsigned-varint 0.7.0", "void", @@ -3439,7 +3540,7 @@ dependencies = [ "libp2p-swarm", "log 0.4.14", "rand 0.8.4", - "smallvec 1.6.1", + "smallvec 1.7.0", "socket2 0.4.0", "void", ] @@ -3458,7 +3559,7 @@ dependencies = [ "nohash-hasher", "parking_lot 0.11.1", "rand 0.7.3", - "smallvec 1.6.1", + "smallvec 1.7.0", "unsigned-varint 0.7.0", ] @@ -3547,7 +3648,7 @@ dependencies = [ "prost", "prost-build", "rand 0.7.3", - "smallvec 1.6.1", + "smallvec 1.7.0", "unsigned-varint 0.7.0", "void", "wasm-timer", @@ -3568,7 +3669,7 @@ dependencies = [ "lru", "minicbor", "rand 0.7.3", - "smallvec 1.6.1", + "smallvec 1.7.0", "unsigned-varint 0.7.0", "wasm-timer", ] @@ -3584,7 +3685,7 @@ dependencies = [ "libp2p-core", "log 0.4.14", "rand 0.7.3", - "smallvec 1.6.1", + "smallvec 1.7.0", "void", "wasm-timer", ] @@ -3711,9 +3812,9 @@ dependencies = [ "base64 0.12.3", "digest 0.9.0", "hmac-drbg 0.3.0", - "libsecp256k1-core", - "libsecp256k1-gen-ecmult", - "libsecp256k1-gen-genmult", + "libsecp256k1-core 0.2.2", + "libsecp256k1-gen-ecmult 0.2.1", + "libsecp256k1-gen-genmult 0.2.1", "rand 0.7.3", "serde", "sha2 0.9.3", @@ -3730,15 +3831,32 @@ dependencies = [ "base64 0.12.3", "digest 0.9.0", "hmac-drbg 0.3.0", - "libsecp256k1-core", - "libsecp256k1-gen-ecmult", - "libsecp256k1-gen-genmult", + "libsecp256k1-core 0.2.2", + "libsecp256k1-gen-ecmult 0.2.1", + "libsecp256k1-gen-genmult 0.2.1", "rand 0.7.3", "serde", "sha2 0.9.3", "typenum", ] +[[package]] +name = "libsecp256k1" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0452aac8bab02242429380e9b2f94ea20cea2b37e2c1777a1358799bbe97f37" +dependencies = [ + "arrayref", + "base64 0.13.0", + "digest 0.9.0", + "libsecp256k1-core 0.3.0", + "libsecp256k1-gen-ecmult 0.3.0", + "libsecp256k1-gen-genmult 0.3.0", + "rand 0.8.4", + "serde", + "sha2 0.9.3", +] + [[package]] name = "libsecp256k1-core" version = "0.2.2" @@ -3750,13 +3868,33 @@ dependencies = [ "subtle 2.4.0", ] +[[package]] +name = "libsecp256k1-core" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5be9b9bb642d8522a44d533eab56c16c738301965504753b03ad1de3425d5451" +dependencies = [ + "crunchy", + "digest 0.9.0", + "subtle 2.4.0", +] + [[package]] name = "libsecp256k1-gen-ecmult" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ccab96b584d38fac86a83f07e659f0deafd0253dc096dab5a36d53efe653c5c3" dependencies = [ - "libsecp256k1-core", + "libsecp256k1-core 0.2.2", +] + +[[package]] +name = "libsecp256k1-gen-ecmult" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3038c808c55c87e8a172643a7d87187fc6c4174468159cb3090659d55bcb4809" +dependencies = [ + "libsecp256k1-core 0.3.0", ] [[package]] @@ -3765,7 +3903,16 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67abfe149395e3aa1c48a2beb32b068e2334402df8181f818d3aee2b304c4f5d" dependencies = [ - "libsecp256k1-core", + "libsecp256k1-core 0.2.2", +] + +[[package]] +name = "libsecp256k1-gen-genmult" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3db8d6ba2cec9eacc40e6e8ccc98931840301f1006e95647ceb2dd5c3aa06f7c" +dependencies = [ + "libsecp256k1-core 0.3.0", ] [[package]] @@ -3775,6 +3922,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "602113192b08db8f38796c4e85c39e960c145965140e918018bcde1952429655" dependencies = [ "cc", + "libc", "pkg-config", "vcpkg", ] @@ -3950,9 +4098,9 @@ checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" [[package]] name = "memchr" -version = "2.3.4" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" +checksum = "b16bd47d9e329435e309c58469fe0791c2d0d1ba96ec0954152a5ae2b04387dc" [[package]] name = "memmap" @@ -4211,7 +4359,7 @@ dependencies = [ "futures 0.3.16", "log 0.4.14", "pin-project 1.0.5", - "smallvec 1.6.1", + "smallvec 1.7.0", "unsigned-varint 0.7.0", ] @@ -4351,6 +4499,7 @@ dependencies = [ "frame-system", "futures 0.3.16", "hex-literal", + "jsonrpsee-ws-client", "log 0.4.14", "nix", "node-executor", @@ -4364,6 +4513,7 @@ dependencies = [ "platforms", "rand 0.7.3", "regex", + "remote-externalities", "sc-authority-discovery", "sc-basic-authorship", "sc-chain-spec", @@ -4408,7 +4558,9 @@ dependencies = [ "substrate-build-script-utils", "substrate-frame-cli", "tempfile", + "tokio", "try-runtime-cli", + "wait-timeout", ] [[package]] @@ -4530,6 +4682,7 @@ dependencies = [ "pallet-authority-discovery", "pallet-authorship", "pallet-babe", + "pallet-bags-list", "pallet-balances", "pallet-bounties", "pallet-collective", @@ -4591,6 +4744,15 @@ dependencies = [ "substrate-wasm-builder", ] +[[package]] +name = "node-runtime-generate-bags" +version = "3.0.0" +dependencies = [ + "generate-bags", + "node-runtime", + "structopt", +] + [[package]] name = "node-template" version = "3.0.0" @@ -4711,9 +4873,9 @@ checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" [[package]] name = "nom" -version = "6.2.1" +version = "6.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c5c51b9083a3c620fa67a2a635d1ce7d95b897e957d6b28ff9a5da960a103a6" +checksum = "e7413f999671bd4745a7b624bd370a569fb6bc574b23c83a3c5ed2e453f3d5e2" dependencies = [ "bitvec 0.19.5", "funty", @@ -4750,6 +4912,16 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-format" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bafe4179722c2894288ee77a9f044f02811c86af699344c498b0840c698a2465" +dependencies = [ + "arrayvec 0.4.12", + "itoa", +] + [[package]] name = "num-integer" version = "0.1.44" @@ -4815,18 +4987,13 @@ dependencies = [ [[package]] name = "object" -version = "0.23.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9a7ab5d64814df0fe4a4b5ead45ed6c5f181ee3ff04ba344313a6c80446c5d4" - -[[package]] -name = "object" -version = "0.24.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a5b3dd1c072ee7963717671d1ca129f1048fda25edea6b752bfc71ac8854170" +checksum = "c55827317fb4c08822499848a14237d2874d6f139828893017237e7ab93eb386" dependencies = [ "crc32fast", "indexmap", + "memchr", ] [[package]] @@ -5017,6 +5184,25 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-bags-list" +version = "4.0.0-dev" +dependencies = [ + "frame-benchmarking", + "frame-election-provider-support", + "frame-support", + "frame-system", + "log 0.4.14", + "pallet-balances", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", + "sp-tracing", +] + [[package]] name = "pallet-balances" version = "4.0.0-dev" @@ -5034,6 +5220,50 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-beefy" +version = "4.0.0-dev" +dependencies = [ + "beefy-primitives", + "frame-support", + "frame-system", + "pallet-session", + "parity-scale-codec", + "scale-info", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-staking", + "sp-std", +] + +[[package]] +name = "pallet-beefy-mmr" +version = "4.0.0-dev" +dependencies = [ + "beefy-merkle-tree", + "beefy-primitives", + "frame-support", + "frame-system", + "hex", + "hex-literal", + "libsecp256k1 0.7.0", + "log 0.4.14", + "pallet-beefy", + "pallet-mmr", + "pallet-mmr-primitives", + "pallet-session", + "parity-scale-codec", + "scale-info", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-staking", + "sp-std", +] + [[package]] name = "pallet-bounties" version = "4.0.0-dev" @@ -5041,6 +5271,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "log 0.4.14", "pallet-balances", "pallet-treasury", "parity-scale-codec", @@ -5092,7 +5323,7 @@ dependencies = [ "rand_pcg 0.2.1", "scale-info", "serde", - "smallvec 1.6.1", + "smallvec 1.7.0", "sp-core", "sp-io", "sp-runtime", @@ -5707,6 +5938,7 @@ dependencies = [ "frame-system", "log 0.4.14", "pallet-authorship", + "pallet-bags-list", "pallet-balances", "pallet-session", "pallet-staking-reward-curve", @@ -5718,6 +5950,7 @@ dependencies = [ "sp-application-crypto", "sp-core", "sp-io", + "sp-npos-elections", "sp-runtime", "sp-staking", "sp-std", @@ -5821,7 +6054,7 @@ dependencies = [ "scale-info", "serde", "serde_json", - "smallvec 1.6.1", + "smallvec 1.7.0", "sp-core", "sp-io", "sp-runtime", @@ -6018,7 +6251,7 @@ dependencies = [ "parity-util-mem-derive", "parking_lot 0.11.1", "primitive-types", - "smallvec 1.6.1", + "smallvec 1.7.0", "winapi 0.3.9", ] @@ -6129,7 +6362,7 @@ dependencies = [ "cloudabi", "libc", "redox_syscall 0.1.57", - "smallvec 1.6.1", + "smallvec 1.7.0", "winapi 0.3.9", ] @@ -6143,7 +6376,7 @@ dependencies = [ "instant", "libc", "redox_syscall 0.2.5", - "smallvec 1.6.1", + "smallvec 1.7.0", "winapi 0.3.9", ] @@ -6996,7 +7229,7 @@ dependencies = [ "log 0.4.14", "rustc-hash", "serde", - "smallvec 1.6.1", + "smallvec 1.7.0", ] [[package]] @@ -7769,7 +8002,6 @@ dependencies = [ "sc-allocator", "sc-executor-common", "sc-runtime-test", - "scoped-tls", "sp-core", "sp-io", "sp-runtime-interface", @@ -7934,7 +8166,7 @@ dependencies = [ "sc-utils", "serde", "serde_json", - "smallvec 1.6.1", + "smallvec 1.7.0", "sp-arithmetic", "sp-blockchain", "sp-consensus", @@ -8459,6 +8691,26 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +[[package]] +name = "scroll" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fda28d4b4830b807a8b43f7b0e6b5df875311b3e7621d84577188c175b6ec1ec" +dependencies = [ + "scroll_derive", +] + +[[package]] +name = "scroll_derive" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aaaae8f38bb311444cfb7f1979af0bc9240d95795f75f9ceddf6a59b79ceffa0" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "sct" version = "0.6.0" @@ -8546,9 +8798,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.126" +version = "1.0.130" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec7505abeacaec74ae4778d9d9328fe5a5d04253220a85c4ee022239fc996d03" +checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913" dependencies = [ "serde_derive", ] @@ -8574,9 +8826,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.126" +version = "1.0.130" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "963a7dbc9895aeac7ac90e74f34a5d5261828f79df35cbed41e10189d3804d43" +checksum = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b" dependencies = [ "proc-macro2", "quote", @@ -8740,9 +8992,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" +checksum = "1ecab6c735a6bb4139c0caafd0cc3635748bbb3acf4550e8138122099251f309" [[package]] name = "snap" @@ -9180,7 +9432,6 @@ dependencies = [ "sp-core", "sp-externalities", "sp-keystore", - "sp-maybe-compressed-blob", "sp-runtime-interface", "sp-state-machine", "sp-std", @@ -9451,7 +9702,7 @@ dependencies = [ "parking_lot 0.11.1", "pretty_assertions 0.6.1", "rand 0.7.3", - "smallvec 1.6.1", + "smallvec 1.7.0", "sp-core", "sp-externalities", "sp-panic-handler", @@ -9696,6 +9947,9 @@ name = "strum" version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aaf86bbcfd1fa9670b7a129f64fc0c9fcbbfe4f1bc4210e9e98fe71ffc12cde2" +dependencies = [ + "strum_macros 0.21.1", +] [[package]] name = "strum_macros" @@ -10229,9 +10483,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.10.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01cf844b23c6131f624accf65ce0e4e9956a8bb329400ea5bcc26ae3a5c20b0b" +checksum = "c2c2416fdedca8443ae44b4527de1ea633af61d8f7169ffa6e72c5b53d24efcc" dependencies = [ "autocfg 1.0.1", "bytes 1.0.1", @@ -10483,7 +10737,7 @@ dependencies = [ "serde", "serde_json", "sharded-slab", - "smallvec 1.6.1", + "smallvec 1.7.0", "thread_local", "tracing", "tracing-core", @@ -10529,7 +10783,7 @@ dependencies = [ "hashbrown 0.11.2", "log 0.4.14", "rustc-hex", - "smallvec 1.6.1", + "smallvec 1.7.0", ] [[package]] @@ -10569,7 +10823,7 @@ dependencies = [ "lazy_static", "log 0.4.14", "rand 0.8.4", - "smallvec 1.6.1", + "smallvec 1.7.0", "thiserror", "tinyvec", "url 2.2.1", @@ -10589,7 +10843,7 @@ dependencies = [ "lru-cache", "parking_lot 0.11.1", "resolv-conf", - "smallvec 1.6.1", + "smallvec 1.7.0", "thiserror", "trust-dns-proto", ] @@ -10604,6 +10858,7 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" name = "try-runtime-cli" version = "0.10.0-dev" dependencies = [ + "jsonrpsee-ws-client", "log 0.4.14", "parity-scale-codec", "remote-externalities", @@ -10613,9 +10868,12 @@ dependencies = [ "sc-service", "serde", "sp-core", + "sp-externalities", + "sp-io", "sp-keystore", "sp-runtime", "sp-state-machine", + "sp-version", "structopt", ] @@ -11015,7 +11273,7 @@ dependencies = [ "enumset", "serde", "serde_bytes", - "smallvec 1.6.1", + "smallvec 1.7.0", "target-lexicon 0.11.2", "thiserror", "wasmer-types", @@ -11035,7 +11293,7 @@ dependencies = [ "more-asserts", "rayon", "serde", - "smallvec 1.6.1", + "smallvec 1.7.0", "tracing", "wasmer-compiler", "wasmer-types", @@ -11055,7 +11313,7 @@ dependencies = [ "more-asserts", "rayon", "serde", - "smallvec 1.6.1", + "smallvec 1.7.0", "wasmer-compiler", "wasmer-types", "wasmer-vm", @@ -11210,15 +11468,15 @@ checksum = "87cc2fe6350834b4e528ba0901e7aa405d78b89dc1fa3145359eb4de0e323fcf" [[package]] name = "wasmparser" -version = "0.78.2" +version = "0.79.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52144d4c78e5cf8b055ceab8e5fa22814ce4315d6002ad32cfd914f37c12fd65" +checksum = "5b5894be15a559c85779254700e1d35f02f843b5a69152e5c82c626d9fd66c0e" [[package]] name = "wasmtime" -version = "0.27.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b310b9d20fcf59385761d1ade7a3ef06aecc380e3d3172035b919eaf7465d9f7" +checksum = "8bbb8a082a8ef50f7eeb8b82dda9709ef1e68963ea3c94e45581644dd4041835" dependencies = [ "anyhow", "backtrace", @@ -11234,9 +11492,9 @@ dependencies = [ "region", "rustc-demangle", "serde", - "smallvec 1.6.1", + "smallvec 1.7.0", "target-lexicon 0.12.0", - "wasmparser 0.78.2", + "wasmparser 0.79.0", "wasmtime-cache", "wasmtime-environ", "wasmtime-jit", @@ -11247,9 +11505,9 @@ dependencies = [ [[package]] name = "wasmtime-cache" -version = "0.27.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d14d500d5c3dc5f5c097158feee123d64b3097f0d836a2a27dff9c761c73c843" +checksum = "d73391579ca7f24573138ef768b73b2aed5f9d542385c64979b65d60d0912399" dependencies = [ "anyhow", "base64 0.13.0", @@ -11268,78 +11526,78 @@ dependencies = [ [[package]] name = "wasmtime-cranelift" -version = "0.27.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c525b39f062eada7db3c1298287b96dcb6e472b9f6b22501300b28d9fa7582f6" +checksum = "81c6f5ae9205382345c7cd7454932a906186836999a2161c385e38a15f52e1fe" dependencies = [ - "cranelift-codegen 0.74.0", - "cranelift-entity 0.74.0", - "cranelift-frontend 0.74.0", + "cranelift-codegen 0.76.0", + "cranelift-entity 0.76.0", + "cranelift-frontend 0.76.0", "cranelift-wasm", "target-lexicon 0.12.0", - "wasmparser 0.78.2", + "wasmparser 0.79.0", "wasmtime-environ", ] [[package]] name = "wasmtime-debug" -version = "0.27.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5d2a763e7a6fc734218e0e463196762a4f409c483063d81e0e85f96343b2e0a" +checksum = "c69e08f55e12f15f50b1b533bc3626723e7224254a065de6576934c86258c9e8" dependencies = [ "anyhow", - "gimli 0.24.0", + "gimli 0.25.0", "more-asserts", - "object 0.24.0", + "object 0.26.0", "target-lexicon 0.12.0", "thiserror", - "wasmparser 0.78.2", + "wasmparser 0.79.0", "wasmtime-environ", ] [[package]] name = "wasmtime-environ" -version = "0.27.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f64d0c2d881c31b0d65c1f2695e022d71eb60b9fbdd336aacca28208b58eac90" +checksum = "005d93174040af37fb8625f891cd9827afdad314261f7ec4ee61ec497d6e9d3c" dependencies = [ "cfg-if 1.0.0", - "cranelift-codegen 0.74.0", - "cranelift-entity 0.74.0", + "cranelift-codegen 0.76.0", + "cranelift-entity 0.76.0", "cranelift-wasm", - "gimli 0.24.0", + "gimli 0.25.0", "indexmap", "log 0.4.14", "more-asserts", "serde", "thiserror", - "wasmparser 0.78.2", + "wasmparser 0.79.0", ] [[package]] name = "wasmtime-jit" -version = "0.27.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d4539ea734422b7c868107e2187d7746d8affbcaa71916d72639f53757ad707" +checksum = "d0bf1dfb213a35d8f21aefae40e597fe72778a907011ffdff7affb029a02af9a" dependencies = [ - "addr2line 0.15.1", + "addr2line", "anyhow", "cfg-if 1.0.0", - "cranelift-codegen 0.74.0", - "cranelift-entity 0.74.0", - "cranelift-frontend 0.74.0", + "cranelift-codegen 0.76.0", + "cranelift-entity 0.76.0", + "cranelift-frontend 0.76.0", "cranelift-native", "cranelift-wasm", - "gimli 0.24.0", + "gimli 0.25.0", "log 0.4.14", "more-asserts", - "object 0.24.0", + "object 0.26.0", "rayon", "region", "serde", "target-lexicon 0.12.0", "thiserror", - "wasmparser 0.78.2", + "wasmparser 0.79.0", "wasmtime-cranelift", "wasmtime-debug", "wasmtime-environ", @@ -11351,13 +11609,13 @@ dependencies = [ [[package]] name = "wasmtime-obj" -version = "0.27.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e1a8ff85246d091828e2225af521a6208ed28c997bb5c39eb697366dc2e2f2b" +checksum = "d231491878e710c68015228c9f9fc5955fe5c96dbf1485c15f7bed55b622c83c" dependencies = [ "anyhow", "more-asserts", - "object 0.24.0", + "object 0.26.0", "target-lexicon 0.12.0", "wasmtime-debug", "wasmtime-environ", @@ -11365,14 +11623,17 @@ dependencies = [ [[package]] name = "wasmtime-profiling" -version = "0.27.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e24364d522dcd67c897c8fffc42e5bdfc57207bbb6d7eeade0da9d4a7d70105b" +checksum = "21486cfb5255c2069666c1f116f9e949d4e35c9a494f11112fa407879e42198d" dependencies = [ "anyhow", "cfg-if 1.0.0", + "gimli 0.25.0", "lazy_static", "libc", + "object 0.26.0", + "scroll", "serde", "target-lexicon 0.12.0", "wasmtime-environ", @@ -11381,9 +11642,9 @@ dependencies = [ [[package]] name = "wasmtime-runtime" -version = "0.27.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c51e57976e8a19a18a18e002c6eb12e5769554204238e47ff155fda1809ef0f7" +checksum = "d7ddfdf32e0a20d81f48be9dacd31612bc61de5a174d1356fef806d300f507de" dependencies = [ "anyhow", "backtrace", @@ -11632,18 +11893,18 @@ dependencies = [ [[package]] name = "zstd" -version = "0.6.1+zstd.1.4.9" +version = "0.9.0+zstd.1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de55e77f798f205d8561b8fe2ef57abfb6e0ff2abe7fd3c089e119cdb5631a3" +checksum = "07749a5dc2cb6b36661290245e350f15ec3bbb304e493db54a1d354480522ccd" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "3.0.1+zstd.1.4.9" +version = "4.1.1+zstd.1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1387cabcd938127b30ce78c4bf00b30387dddf704e3f0881dbc4ff62b5566f8c" +checksum = "c91c90f2c593b003603e5e0493c837088df4469da25aafff8bce42ba48caf079" dependencies = [ "libc", "zstd-sys", @@ -11651,9 +11912,9 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "1.4.20+zstd.1.4.9" +version = "1.6.1+zstd.1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebd5b733d7cf2d9447e2c3e76a5589b4f5e5ae065c22a2bc0b023cbc331b6c8e" +checksum = "615120c7a2431d16cf1cf979e7fc31ba7a5b5e5707b29c8a99e5dbf8a8392a33" dependencies = [ "cc", "libc", diff --git a/Cargo.toml b/Cargo.toml index bca0c816217ee..71473a4bc5689 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,6 +19,8 @@ members = [ "client/api", "client/authority-discovery", "client/basic-authorship", + "client/beefy", + "client/beefy/rpc", "client/block-builder", "client/chain-spec", "client/chain-spec/derive", @@ -69,6 +71,9 @@ members = [ "frame/authorship", "frame/babe", "frame/balances", + "frame/beefy", + "frame/beefy-mmr", + "frame/beefy-mmr/primitives", "frame/benchmarking", "frame/bounties", "frame/collective", @@ -128,6 +133,7 @@ members = [ "frame/uniques", "frame/utility", "frame/vesting", + "frame/bags-list", "primitives/api", "primitives/api/proc-macro", "primitives/api/test", @@ -137,6 +143,7 @@ members = [ "primitives/arithmetic/fuzzer", "primitives/authority-discovery", "primitives/authorship", + "primitives/beefy", "primitives/block-builder", "primitives/blockchain", "primitives/consensus/aura", @@ -198,6 +205,8 @@ members = [ "utils/frame/try-runtime/cli", "utils/frame/rpc/support", "utils/frame/rpc/system", + "utils/frame/generate-bags", + "utils/frame/generate-bags/node-runtime", "utils/prometheus", "utils/wasm-builder", ] diff --git a/bin/node-template/README.md b/bin/node-template/README.md index cd977fac84493..e618b13c2a2f0 100644 --- a/bin/node-template/README.md +++ b/bin/node-template/README.md @@ -1,10 +1,21 @@ # Substrate Node Template +[![Try on playground](https://img.shields.io/badge/Playground-Node_Template-brightgreen?logo=Parity%20Substrate)](https://playground.substrate.dev/?deploy=node-template) [![Matrix](https://img.shields.io/matrix/substrate-technical:matrix.org)](https://matrix.to/#/#substrate-technical:matrix.org) + A fresh FRAME-based [Substrate](https://www.substrate.io/) node, ready for hacking :rocket: ## Getting Started -Follow these steps to get started with the Node Template :hammer_and_wrench: +Follow the steps below to get started with the Node Template, or get it up and running right from +your browser in just a few clicks using [Playground](https://playground.substrate.dev/) +:hammer_and_wrench: + +### Using Nix + +Install [nix](https://nixos.org/) and optionally [direnv](https://github.com/direnv/direnv) and +[lorri](https://github.com/target/lorri) for a fully plug and play experience for setting up the +development environment. To get all the correct dependencies activate direnv `direnv allow` and +lorri `lorri shell`. ### Rust Setup @@ -62,10 +73,17 @@ Start the development chain with detailed logging: RUST_BACKTRACE=1 ./target/release/node-template -ldebug --dev ``` +### Connect with Polkadot-JS Apps Front-end + +Once the node template is running locally, you can connect it with **Polkadot-JS Apps** front-end +to interact with your chain. [Click +here](https://polkadot.js.org/apps/#/explorer?rpc=ws://localhost:9944) connecting the Apps to your +local node template. + ### Multi-Node Local Testnet -If you want to see the multi-node consensus algorithm in action, refer to -[our Start a Private Network tutorial](https://substrate.dev/docs/en/tutorials/start-a-private-network/). +If you want to see the multi-node consensus algorithm in action, refer to our +[Start a Private Network tutorial](https://substrate.dev/docs/en/tutorials/start-a-private-network/). ## Template Structure @@ -77,34 +95,34 @@ directories. A blockchain node is an application that allows users to participate in a blockchain network. Substrate-based blockchain nodes expose a number of capabilities: -- Networking: Substrate nodes use the [`libp2p`](https://libp2p.io/) networking stack to allow the - nodes in the network to communicate with one another. -- Consensus: Blockchains must have a way to come to - [consensus](https://substrate.dev/docs/en/knowledgebase/advanced/consensus) on the state of the - network. Substrate makes it possible to supply custom consensus engines and also ships with - several consensus mechanisms that have been built on top of - [Web3 Foundation research](https://research.web3.foundation/en/latest/polkadot/NPoS/index.html). -- RPC Server: A remote procedure call (RPC) server is used to interact with Substrate nodes. +- Networking: Substrate nodes use the [`libp2p`](https://libp2p.io/) networking stack to allow the + nodes in the network to communicate with one another. +- Consensus: Blockchains must have a way to come to + [consensus](https://substrate.dev/docs/en/knowledgebase/advanced/consensus) on the state of the + network. Substrate makes it possible to supply custom consensus engines and also ships with + several consensus mechanisms that have been built on top of + [Web3 Foundation research](https://research.web3.foundation/en/latest/polkadot/NPoS/index.html). +- RPC Server: A remote procedure call (RPC) server is used to interact with Substrate nodes. There are several files in the `node` directory - take special note of the following: -- [`chain_spec.rs`](./node/src/chain_spec.rs): A - [chain specification](https://substrate.dev/docs/en/knowledgebase/integrate/chain-spec) is a - source code file that defines a Substrate chain's initial (genesis) state. Chain specifications - are useful for development and testing, and critical when architecting the launch of a - production chain. Take note of the `development_config` and `testnet_genesis` functions, which - are used to define the genesis state for the local development chain configuration. These - functions identify some - [well-known accounts](https://substrate.dev/docs/en/knowledgebase/integrate/subkey#well-known-keys) - and use them to configure the blockchain's initial state. -- [`service.rs`](./node/src/service.rs): This file defines the node implementation. Take note of - the libraries that this file imports and the names of the functions it invokes. In particular, - there are references to consensus-related topics, such as the - [longest chain rule](https://substrate.dev/docs/en/knowledgebase/advanced/consensus#longest-chain-rule), - the [Aura](https://substrate.dev/docs/en/knowledgebase/advanced/consensus#aura) block authoring - mechanism and the - [GRANDPA](https://substrate.dev/docs/en/knowledgebase/advanced/consensus#grandpa) finality - gadget. +- [`chain_spec.rs`](./node/src/chain_spec.rs): A + [chain specification](https://substrate.dev/docs/en/knowledgebase/integrate/chain-spec) is a + source code file that defines a Substrate chain's initial (genesis) state. Chain specifications + are useful for development and testing, and critical when architecting the launch of a + production chain. Take note of the `development_config` and `testnet_genesis` functions, which + are used to define the genesis state for the local development chain configuration. These + functions identify some + [well-known accounts](https://substrate.dev/docs/en/knowledgebase/integrate/subkey#well-known-keys) + and use them to configure the blockchain's initial state. +- [`service.rs`](./node/src/service.rs): This file defines the node implementation. Take note of + the libraries that this file imports and the names of the functions it invokes. In particular, + there are references to consensus-related topics, such as the + [longest chain rule](https://substrate.dev/docs/en/knowledgebase/advanced/consensus#longest-chain-rule), + the [Aura](https://substrate.dev/docs/en/knowledgebase/advanced/consensus#aura) block authoring + mechanism and the + [GRANDPA](https://substrate.dev/docs/en/knowledgebase/advanced/consensus#grandpa) finality + gadget. After the node has been [built](#build), refer to the embedded documentation to learn more about the capabilities and configuration parameters that it exposes: @@ -130,13 +148,13 @@ create pallets and flexibly compose them to create blockchains that can address Review the [FRAME runtime implementation](./runtime/src/lib.rs) included in this template and note the following: -- This file configures several pallets to include in the runtime. Each pallet configuration is - defined by a code block that begins with `impl $PALLET_NAME::Config for Runtime`. -- The pallets are composed into a single runtime by way of the - [`construct_runtime!`](https://crates.parity.io/frame_support/macro.construct_runtime.html) - macro, which is part of the core - [FRAME Support](https://substrate.dev/docs/en/knowledgebase/runtime/frame#support-library) - library. +- This file configures several pallets to include in the runtime. Each pallet configuration is + defined by a code block that begins with `impl $PALLET_NAME::Config for Runtime`. +- The pallets are composed into a single runtime by way of the + [`construct_runtime!`](https://crates.parity.io/frame_support/macro.construct_runtime.html) + macro, which is part of the core + [FRAME Support](https://substrate.dev/docs/en/knowledgebase/runtime/frame#support-library) + library. ### Pallets @@ -146,17 +164,17 @@ template pallet that is [defined in the `pallets`](./pallets/template/src/lib.rs A FRAME pallet is compromised of a number of blockchain primitives: -- Storage: FRAME defines a rich set of powerful - [storage abstractions](https://substrate.dev/docs/en/knowledgebase/runtime/storage) that makes - it easy to use Substrate's efficient key-value database to manage the evolving state of a - blockchain. -- Dispatchables: FRAME pallets define special types of functions that can be invoked (dispatched) - from outside of the runtime in order to update its state. -- Events: Substrate uses [events](https://substrate.dev/docs/en/knowledgebase/runtime/events) to - notify users of important changes in the runtime. -- Errors: When a dispatchable fails, it returns an error. -- Config: The `Config` configuration interface is used to define the types and parameters upon - which a FRAME pallet depends. +- Storage: FRAME defines a rich set of powerful + [storage abstractions](https://substrate.dev/docs/en/knowledgebase/runtime/storage) that makes + it easy to use Substrate's efficient key-value database to manage the evolving state of a + blockchain. +- Dispatchables: FRAME pallets define special types of functions that can be invoked (dispatched) + from outside of the runtime in order to update its state. +- Events: Substrate uses [events](https://substrate.dev/docs/en/knowledgebase/runtime/events) to + notify users of important changes in the runtime. +- Errors: When a dispatchable fails, it returns an error. +- Config: The `Config` configuration interface is used to define the types and parameters upon + which a FRAME pallet depends. ### Run in Docker @@ -170,7 +188,8 @@ Then run the following command to start a single node development chain. ``` This command will firstly compile your code, and then start a local development network. You can -also replace the default command (`cargo build --release && ./target/release/node-template --dev --ws-external`) +also replace the default command +(`cargo build --release && ./target/release/node-template --dev --ws-external`) by appending your own. A few useful ones are as follow. ```bash diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index 04d70b338ac04..c8f2b52f112f5 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -1,14 +1,14 @@ [package] name = "node-template" version = "3.0.0" -authors = ["Substrate DevHub "] description = "A fresh FRAME-based Substrate node, ready for hacking." +authors = ["Substrate DevHub "] +homepage = "https://substrate.dev" edition = "2018" license = "Unlicense" -build = "build.rs" -homepage = "https://substrate.dev" -repository = "https://github.com/substrate-developer-hub/substrate-node-template/" publish = false +repository = "https://github.com/substrate-developer-hub/substrate-node-template/" +build = "build.rs" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/bin/node-template/node/src/cli.rs b/bin/node-template/node/src/cli.rs index 8b551051c1b19..8ed1d35ba5f92 100644 --- a/bin/node-template/node/src/cli.rs +++ b/bin/node-template/node/src/cli.rs @@ -35,7 +35,7 @@ pub enum Subcommand { /// Revert the chain to a previous state. Revert(sc_cli::RevertCmd), - /// The custom benchmark subcommmand benchmarking runtime pallets. + /// The custom benchmark subcommand benchmarking runtime pallets. #[structopt(name = "benchmark", about = "Benchmark runtime pallets.")] Benchmark(frame_benchmarking_cli::BenchmarkCmd), } diff --git a/bin/node-template/pallets/template/Cargo.toml b/bin/node-template/pallets/template/Cargo.toml index b3eb747625b4f..dccb37c6540c3 100644 --- a/bin/node-template/pallets/template/Cargo.toml +++ b/bin/node-template/pallets/template/Cargo.toml @@ -1,14 +1,13 @@ [package] -authors = ['Substrate DevHub '] -edition = '2018' name = 'pallet-template' version = "3.0.0" -license = "Unlicense" -homepage = "https://substrate.dev" -repository = "https://github.com/substrate-developer-hub/substrate-node-template/" description = "FRAME pallet template for defining custom runtime logic." -readme = "README.md" +authors = ['Substrate DevHub '] +homepage = "https://substrate.dev" +edition = '2018' +license = "Unlicense" publish = false +repository = "https://github.com/substrate-developer-hub/substrate-node-template/" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/bin/node-template/pallets/template/src/benchmarking.rs b/bin/node-template/pallets/template/src/benchmarking.rs index 2117c048cfbdb..d496a9fc89b1a 100644 --- a/bin/node-template/pallets/template/src/benchmarking.rs +++ b/bin/node-template/pallets/template/src/benchmarking.rs @@ -4,7 +4,7 @@ use super::*; #[allow(unused)] use crate::Pallet as Template; -use frame_benchmarking::{benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_benchmarking::{benchmarks, whitelisted_caller}; use frame_system::RawOrigin; benchmarks! { @@ -15,6 +15,6 @@ benchmarks! { verify { assert_eq!(Something::::get(), Some(s)); } -} -impl_benchmark_test_suite!(Template, crate::mock::new_test_ext(), crate::mock::Test); + impl_benchmark_test_suite!(Template, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/bin/node-template/runtime/Cargo.toml b/bin/node-template/runtime/Cargo.toml index 47e67af2b9ae1..081e458b18e08 100644 --- a/bin/node-template/runtime/Cargo.toml +++ b/bin/node-template/runtime/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "node-template-runtime" version = "3.0.0" +description = 'A fresh FRAME-based Substrate runtime, ready for hacking.' authors = ["Substrate DevHub "] +homepage = "https://substrate.dev" edition = "2018" license = "Unlicense" -homepage = "https://substrate.dev" -repository = "https://github.com/substrate-developer-hub/substrate-node-template/" publish = false +repository = "https://github.com/substrate-developer-hub/substrate-node-template/" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index eecc93e166666..ca6e6b1822d45 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -222,6 +222,7 @@ impl pallet_grandpa::Config for Runtime { type HandleEquivocation = (); type WeightInfo = (); + type MaxAuthorities = MaxAuthorities; } parameter_types! { diff --git a/bin/node-template/shell.nix b/bin/node-template/shell.nix new file mode 100644 index 0000000000000..c08005c1630e9 --- /dev/null +++ b/bin/node-template/shell.nix @@ -0,0 +1,35 @@ +let + mozillaOverlay = + import (builtins.fetchGit { + url = "https://github.com/mozilla/nixpkgs-mozilla.git"; + rev = "57c8084c7ef41366993909c20491e359bbb90f54"; + }); + pinned = builtins.fetchGit { + # Descriptive name to make the store path easier to identify + url = "https://github.com/nixos/nixpkgs/"; + # Commit hash for nixos-unstable as of 2020-04-26 + # `git ls-remote https://github.com/nixos/nixpkgs nixos-unstable` + ref = "refs/heads/nixos-unstable"; + rev = "1fe6ed37fd9beb92afe90671c0c2a662a03463dd"; + }; + nixpkgs = import pinned { overlays = [ mozillaOverlay ]; }; + toolchain = with nixpkgs; (rustChannelOf { date = "2021-09-14"; channel = "nightly"; }); + rust-wasm = toolchain.rust.override { + targets = [ "wasm32-unknown-unknown" ]; + }; +in +with nixpkgs; pkgs.mkShell { + buildInputs = [ + clang + pkg-config + rust-wasm + ] ++ stdenv.lib.optionals stdenv.isDarwin [ + darwin.apple_sdk.frameworks.Security + ]; + + LIBCLANG_PATH = "${llvmPackages.libclang}/lib"; + PROTOC = "${protobuf}/bin/protoc"; + RUST_SRC_PATH = "${toolchain.rust-src}/lib/rustlib/src/rust/library/"; + ROCKSDB_LIB_DIR = "${rocksdb}/lib"; + +} diff --git a/bin/node/bench/src/construct.rs b/bin/node/bench/src/construct.rs index 1532e02bd3ef6..ca1a1c18f9ea9 100644 --- a/bin/node/bench/src/construct.rs +++ b/bin/node/bench/src/construct.rs @@ -30,8 +30,8 @@ use std::{borrow::Cow, collections::HashMap, pin::Pin, sync::Arc}; use node_primitives::Block; use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes, Profile}; use sc_transaction_pool_api::{ - ImportNotificationStream, PoolFuture, PoolStatus, TransactionFor, TransactionSource, - TransactionStatusStreamFor, TxHash, + ImportNotificationStream, PoolFuture, PoolStatus, ReadyTransactions, TransactionFor, + TransactionSource, TransactionStatusStreamFor, TxHash, }; use sp_consensus::{Environment, Proposer}; use sp_inherents::InherentDataProvider; @@ -216,6 +216,19 @@ impl sc_transaction_pool_api::InPoolTransaction for PoolTransaction { #[derive(Clone, Debug)] pub struct Transactions(Vec>); +pub struct TransactionsIterator(std::vec::IntoIter>); + +impl Iterator for TransactionsIterator { + type Item = Arc; + + fn next(&mut self) -> Option { + self.0.next() + } +} + +impl ReadyTransactions for TransactionsIterator { + fn report_invalid(&mut self, _tx: &Self::Item) {} +} impl sc_transaction_pool_api::TransactionPool for Transactions { type Block = Block; @@ -257,16 +270,17 @@ impl sc_transaction_pool_api::TransactionPool for Transactions { _at: NumberFor, ) -> Pin< Box< - dyn Future> + Send>> - + Send, + dyn Future< + Output = Box> + Send>, + > + Send, >, > { - let iter: Box> + Send> = - Box::new(self.0.clone().into_iter()); + let iter: Box> + Send> = + Box::new(TransactionsIterator(self.0.clone().into_iter())); Box::pin(futures::future::ready(iter)) } - fn ready(&self) -> Box> + Send> { + fn ready(&self) -> Box> + Send> { unimplemented!() } diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 6a12af4b278b7..1fc7a38b23ca8 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -122,8 +122,12 @@ nix = "0.19" serde_json = "1.0" regex = "1" platforms = "1.1" -async-std = { version = "1.6.5", features = ["attributes"] } +async-std = { version = "1.10.0", features = ["attributes"] } soketto = "0.4.2" +jsonrpsee-ws-client = { version = "0.3.0", default-features = false, features = ["tokio1"] } +tokio = { version = "1.10", features = ["macros", "time"] } +wait-timeout = "0.2" +remote-externalities = { path = "../../../utils/frame/remote-externalities" } [build-dependencies] structopt = { version = "0.3.8", optional = true } @@ -135,7 +139,7 @@ try-runtime-cli = { version = "0.10.0-dev", optional = true, path = "../../../ut sc-cli = { version = "0.10.0-dev", path = "../../../client/cli", optional = true } [features] -default = [ "cli" ] +default = ["cli"] cli = [ "node-executor/wasmi-errno", "node-inspect", diff --git a/bin/node/cli/src/chain_spec.rs b/bin/node/cli/src/chain_spec.rs index 352e007a891ba..b5e36d9b53629 100644 --- a/bin/node/cli/src/chain_spec.rs +++ b/bin/node/cli/src/chain_spec.rs @@ -265,7 +265,7 @@ pub fn testnet_genesis( .map(|x| &x.0) .chain(initial_nominators.iter()) .for_each(|x| { - if !endowed_accounts.contains(&x) { + if !endowed_accounts.contains(x) { endowed_accounts.push(x.clone()) } }); @@ -361,6 +361,7 @@ pub fn testnet_genesis( max_members: 999, }, vesting: Default::default(), + assets: Default::default(), gilt: Default::default(), transaction_storage: Default::default(), } diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index acc7df5b1e5a3..b1a3bd4722597 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -86,7 +86,7 @@ pub fn new_partial( let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::( - &config, + config, telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), executor, )?; @@ -277,7 +277,7 @@ pub fn new_full_base( let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { config, - backend: backend.clone(), + backend, client: client.clone(), keystore: keystore_container.sync_keystore(), network: network.clone(), @@ -507,7 +507,7 @@ pub fn new_light_base( babe_block_import, Some(Box::new(justification_import)), client.clone(), - select_chain.clone(), + select_chain, move |_, ()| async move { let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); diff --git a/bin/node/cli/tests/check_block_works.rs b/bin/node/cli/tests/check_block_works.rs index 707fd217e33e8..216bcc6d9fc13 100644 --- a/bin/node/cli/tests/check_block_works.rs +++ b/bin/node/cli/tests/check_block_works.rs @@ -24,11 +24,11 @@ use tempfile::tempdir; pub mod common; -#[test] -fn check_block_works() { +#[tokio::test] +async fn check_block_works() { let base_path = tempdir().expect("could not create a temp dir"); - common::run_node_for_a_while(base_path.path(), &["--dev"]); + common::run_node_for_a_while(base_path.path(), &["--dev"]).await; let status = Command::new(cargo_bin("substrate")) .args(&["check-block", "--dev", "--pruning", "archive", "-d"]) diff --git a/bin/node/cli/tests/common.rs b/bin/node/cli/tests/common.rs index 54b9c749bf1de..85effc858e155 100644 --- a/bin/node/cli/tests/common.rs +++ b/bin/node/cli/tests/common.rs @@ -23,58 +23,115 @@ use nix::{ sys::signal::{kill, Signal::SIGINT}, unistd::Pid, }; +use node_primitives::Block; +use remote_externalities::rpc_api; use std::{ convert::TryInto, + ops::{Deref, DerefMut}, path::Path, process::{Child, Command, ExitStatus}, - thread, time::Duration, }; +use tokio::time::timeout; + +static LOCALHOST_WS: &str = "ws://127.0.0.1:9944/"; /// Wait for the given `child` the given number of `secs`. /// /// Returns the `Some(exit status)` or `None` if the process did not finish in the given time. -pub fn wait_for(child: &mut Child, secs: usize) -> Option { - for i in 0..secs { - match child.try_wait().unwrap() { - Some(status) => { - if i > 5 { - eprintln!("Child process took {} seconds to exit gracefully", i); - } - return Some(status) - }, - None => thread::sleep(Duration::from_secs(1)), +pub fn wait_for(child: &mut Child, secs: u64) -> Result { + let result = wait_timeout::ChildExt::wait_timeout(child, Duration::from_secs(5.min(secs))) + .map_err(|_| ())?; + if let Some(exit_status) = result { + Ok(exit_status) + } else { + if secs > 5 { + eprintln!("Child process taking over 5 seconds to exit gracefully"); + let result = wait_timeout::ChildExt::wait_timeout(child, Duration::from_secs(secs - 5)) + .map_err(|_| ())?; + if let Some(exit_status) = result { + return Ok(exit_status) + } } + eprintln!("Took too long to exit (> {} seconds). Killing...", secs); + let _ = child.kill(); + child.wait().unwrap(); + Err(()) } - eprintln!("Took too long to exit (> {} seconds). Killing...", secs); - let _ = child.kill(); - child.wait().unwrap(); +} + +/// Wait for at least n blocks to be finalized within a specified time. +pub async fn wait_n_finalized_blocks( + n: usize, + timeout_secs: u64, +) -> Result<(), tokio::time::error::Elapsed> { + timeout(Duration::from_secs(timeout_secs), wait_n_finalized_blocks_from(n, LOCALHOST_WS)).await +} + +/// Wait for at least n blocks to be finalized from a specified node +pub async fn wait_n_finalized_blocks_from(n: usize, url: &str) { + let mut built_blocks = std::collections::HashSet::new(); + let mut interval = tokio::time::interval(Duration::from_secs(2)); - None + loop { + if let Ok(block) = rpc_api::get_finalized_head::(url.to_string()).await { + built_blocks.insert(block); + if built_blocks.len() > n { + break + } + }; + interval.tick().await; + } } -/// Run the node for a while (30 seconds) -pub fn run_node_for_a_while(base_path: &Path, args: &[&str]) { +/// Run the node for a while (3 blocks) +pub async fn run_node_for_a_while(base_path: &Path, args: &[&str]) { let mut cmd = Command::new(cargo_bin("substrate")); - let mut cmd = cmd.args(args).arg("-d").arg(base_path).spawn().unwrap(); + let mut child = KillChildOnDrop(cmd.args(args).arg("-d").arg(base_path).spawn().unwrap()); // Let it produce some blocks. - thread::sleep(Duration::from_secs(30)); - assert!(cmd.try_wait().unwrap().is_none(), "the process should still be running"); + let _ = wait_n_finalized_blocks(3, 30).await; + + assert!(child.try_wait().unwrap().is_none(), "the process should still be running"); // Stop the process - kill(Pid::from_raw(cmd.id().try_into().unwrap()), SIGINT).unwrap(); - assert!(wait_for(&mut cmd, 40).map(|x| x.success()).unwrap_or_default()); + kill(Pid::from_raw(child.id().try_into().unwrap()), SIGINT).unwrap(); + assert!(wait_for(&mut child, 40).map(|x| x.success()).unwrap()); } /// Run the node asserting that it fails with an error pub fn run_node_assert_fail(base_path: &Path, args: &[&str]) { let mut cmd = Command::new(cargo_bin("substrate")); - let mut cmd = cmd.args(args).arg("-d").arg(base_path).spawn().unwrap(); + let mut child = KillChildOnDrop(cmd.args(args).arg("-d").arg(base_path).spawn().unwrap()); - // Let it produce some blocks. - thread::sleep(Duration::from_secs(10)); - assert!(cmd.try_wait().unwrap().is_some(), "the process should not be running anymore"); + // Let it produce some blocks, but it should die within 10 seconds. + assert_ne!( + wait_timeout::ChildExt::wait_timeout(&mut *child, Duration::from_secs(10)).unwrap(), + None, + "the process should not be running anymore" + ); +} + +pub struct KillChildOnDrop(pub Child); + +impl Drop for KillChildOnDrop { + fn drop(&mut self) { + let _ = self.0.kill(); + } +} + +impl Deref for KillChildOnDrop { + type Target = Child; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for KillChildOnDrop { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } } diff --git a/bin/node/cli/tests/database_role_subdir_migration.rs b/bin/node/cli/tests/database_role_subdir_migration.rs index 516908111ae72..9338d8a8e4f43 100644 --- a/bin/node/cli/tests/database_role_subdir_migration.rs +++ b/bin/node/cli/tests/database_role_subdir_migration.rs @@ -25,9 +25,9 @@ use tempfile::tempdir; pub mod common; -#[test] +#[tokio::test] #[cfg(unix)] -fn database_role_subdir_migration() { +async fn database_role_subdir_migration() { type Block = RawBlock>; let base_path = tempdir().expect("could not create a temp dir"); @@ -62,7 +62,8 @@ fn database_role_subdir_migration() { "44445", "--no-prometheus", ], - ); + ) + .await; // check if the database dir had been migrated assert!(!path.join("db_version").exists()); diff --git a/bin/node/cli/tests/export_import_flow.rs b/bin/node/cli/tests/export_import_flow.rs index 7cbaa152699b4..937f03b8e5dae 100644 --- a/bin/node/cli/tests/export_import_flow.rs +++ b/bin/node/cli/tests/export_import_flow.rs @@ -182,13 +182,13 @@ impl<'a> ExportImportRevertExecutor<'a> { } } -#[test] -fn export_import_revert() { +#[tokio::test] +async fn export_import_revert() { let base_path = tempdir().expect("could not create a temp dir"); let exported_blocks_file = base_path.path().join("exported_blocks"); let db_path = base_path.path().join("db"); - common::run_node_for_a_while(base_path.path(), &["--dev"]); + common::run_node_for_a_while(base_path.path(), &["--dev"]).await; let mut executor = ExportImportRevertExecutor::new(&base_path, &exported_blocks_file, &db_path); diff --git a/bin/node/cli/tests/inspect_works.rs b/bin/node/cli/tests/inspect_works.rs index 2a89801547a4b..6f980d2acbfcb 100644 --- a/bin/node/cli/tests/inspect_works.rs +++ b/bin/node/cli/tests/inspect_works.rs @@ -24,11 +24,11 @@ use tempfile::tempdir; pub mod common; -#[test] -fn inspect_works() { +#[tokio::test] +async fn inspect_works() { let base_path = tempdir().expect("could not create a temp dir"); - common::run_node_for_a_while(base_path.path(), &["--dev"]); + common::run_node_for_a_while(base_path.path(), &["--dev"]).await; let status = Command::new(cargo_bin("substrate")) .args(&["inspect", "--dev", "--pruning", "archive", "-d"]) diff --git a/bin/node/cli/tests/purge_chain_works.rs b/bin/node/cli/tests/purge_chain_works.rs index 0f16a51e5d0a4..8a8601c863d95 100644 --- a/bin/node/cli/tests/purge_chain_works.rs +++ b/bin/node/cli/tests/purge_chain_works.rs @@ -22,12 +22,12 @@ use tempfile::tempdir; pub mod common; -#[test] +#[tokio::test] #[cfg(unix)] -fn purge_chain_works() { +async fn purge_chain_works() { let base_path = tempdir().expect("could not create a temp dir"); - common::run_node_for_a_while(base_path.path(), &["--dev"]); + common::run_node_for_a_while(base_path.path(), &["--dev"]).await; let status = Command::new(cargo_bin("substrate")) .args(&["purge-chain", "--dev", "-d"]) diff --git a/bin/node/cli/tests/running_the_node_and_interrupt.rs b/bin/node/cli/tests/running_the_node_and_interrupt.rs index 03a1826f2f080..fc5094c2d722f 100644 --- a/bin/node/cli/tests/running_the_node_and_interrupt.rs +++ b/bin/node/cli/tests/running_the_node_and_interrupt.rs @@ -17,7 +17,6 @@ // along with this program. If not, see . #![cfg(unix)] - use assert_cmd::cargo::cargo_bin; use nix::{ sys::signal::{ @@ -26,67 +25,43 @@ use nix::{ }, unistd::Pid, }; -use sc_service::Deref; use std::{ convert::TryInto, - ops::DerefMut, process::{Child, Command}, - thread, - time::Duration, }; use tempfile::tempdir; pub mod common; -#[test] -fn running_the_node_works_and_can_be_interrupted() { - fn run_command_and_kill(signal: Signal) { +#[tokio::test] +async fn running_the_node_works_and_can_be_interrupted() { + async fn run_command_and_kill(signal: Signal) { let base_path = tempdir().expect("could not create a temp dir"); - let mut cmd = Command::new(cargo_bin("substrate")) - .args(&["--dev", "-d"]) - .arg(base_path.path()) - .spawn() - .unwrap(); + let mut cmd = common::KillChildOnDrop( + Command::new(cargo_bin("substrate")) + .args(&["--dev", "-d"]) + .arg(base_path.path()) + .spawn() + .unwrap(), + ); - thread::sleep(Duration::from_secs(20)); + common::wait_n_finalized_blocks(3, 30).await.unwrap(); assert!(cmd.try_wait().unwrap().is_none(), "the process should still be running"); kill(Pid::from_raw(cmd.id().try_into().unwrap()), signal).unwrap(); assert_eq!( common::wait_for(&mut cmd, 30).map(|x| x.success()), - Some(true), + Ok(true), "the process must exit gracefully after signal {}", signal, ); } - run_command_and_kill(SIGINT); - run_command_and_kill(SIGTERM); -} - -struct KillChildOnDrop(Child); - -impl Drop for KillChildOnDrop { - fn drop(&mut self) { - let _ = self.0.kill(); - } -} - -impl Deref for KillChildOnDrop { - type Target = Child; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl DerefMut for KillChildOnDrop { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } + run_command_and_kill(SIGINT).await; + run_command_and_kill(SIGTERM).await; } -#[test] -fn running_two_nodes_with_the_same_ws_port_should_work() { +#[tokio::test] +async fn running_two_nodes_with_the_same_ws_port_should_work() { fn start_node() -> Child { Command::new(cargo_bin("substrate")) .args(&["--dev", "--tmp", "--ws-port=45789"]) @@ -94,10 +69,10 @@ fn running_two_nodes_with_the_same_ws_port_should_work() { .unwrap() } - let mut first_node = KillChildOnDrop(start_node()); - let mut second_node = KillChildOnDrop(start_node()); + let mut first_node = common::KillChildOnDrop(start_node()); + let mut second_node = common::KillChildOnDrop(start_node()); - thread::sleep(Duration::from_secs(30)); + let _ = common::wait_n_finalized_blocks(3, 30).await; assert!(first_node.try_wait().unwrap().is_none(), "The first node should still be running"); assert!(second_node.try_wait().unwrap().is_none(), "The second node should still be running"); @@ -107,12 +82,12 @@ fn running_two_nodes_with_the_same_ws_port_should_work() { assert_eq!( common::wait_for(&mut first_node, 30).map(|x| x.success()), - Some(true), + Ok(true), "The first node must exit gracefully", ); assert_eq!( common::wait_for(&mut second_node, 30).map(|x| x.success()), - Some(true), + Ok(true), "The second node must exit gracefully", ); } diff --git a/bin/node/cli/tests/temp_base_path_works.rs b/bin/node/cli/tests/temp_base_path_works.rs index c107740b9b0a5..5d8e6c9ec4539 100644 --- a/bin/node/cli/tests/temp_base_path_works.rs +++ b/bin/node/cli/tests/temp_base_path_works.rs @@ -29,37 +29,34 @@ use std::{ io::Read, path::PathBuf, process::{Command, Stdio}, - thread, - time::Duration, }; pub mod common; -#[test] -fn temp_base_path_works() { +#[tokio::test] +async fn temp_base_path_works() { let mut cmd = Command::new(cargo_bin("substrate")); - - let mut cmd = cmd - .args(&["--dev", "--tmp"]) - .stdout(Stdio::piped()) - .stderr(Stdio::piped()) - .spawn() - .unwrap(); + let mut child = common::KillChildOnDrop( + cmd.args(&["--dev", "--tmp"]) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .unwrap(), + ); // Let it produce some blocks. - thread::sleep(Duration::from_secs(30)); - assert!(cmd.try_wait().unwrap().is_none(), "the process should still be running"); + common::wait_n_finalized_blocks(3, 30).await.unwrap(); + assert!(child.try_wait().unwrap().is_none(), "the process should still be running"); // Stop the process - kill(Pid::from_raw(cmd.id().try_into().unwrap()), SIGINT).unwrap(); - assert!(common::wait_for(&mut cmd, 40).map(|x| x.success()).unwrap_or_default()); + kill(Pid::from_raw(child.id().try_into().unwrap()), SIGINT).unwrap(); + assert!(common::wait_for(&mut child, 40).map(|x| x.success()).unwrap_or_default()); // Ensure the database has been deleted let mut stderr = String::new(); - cmd.stderr.unwrap().read_to_string(&mut stderr).unwrap(); + child.stderr.as_mut().unwrap().read_to_string(&mut stderr).unwrap(); let re = Regex::new(r"Database: .+ at (\S+)").unwrap(); - let db_path = - PathBuf::from(re.captures(stderr.as_str()).unwrap().get(1).unwrap().as_str().to_string()); + let db_path = PathBuf::from(re.captures(stderr.as_str()).unwrap().get(1).unwrap().as_str()); assert!(!db_path.exists()); } diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index dafd9db8bab96..d434be8f3c609 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -55,6 +55,7 @@ pallet-assets = { version = "4.0.0-dev", default-features = false, path = "../.. pallet-authority-discovery = { version = "4.0.0-dev", default-features = false, path = "../../../frame/authority-discovery" } pallet-authorship = { version = "4.0.0-dev", default-features = false, path = "../../../frame/authorship" } pallet-babe = { version = "4.0.0-dev", default-features = false, path = "../../../frame/babe" } +pallet-bags-list = { version = "4.0.0-dev", default-features = false, path = "../../../frame/bags-list" } pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../../../frame/balances" } pallet-bounties = { version = "4.0.0-dev", default-features = false, path = "../../../frame/bounties" } pallet-collective = { version = "4.0.0-dev", default-features = false, path = "../../../frame/collective" } @@ -110,6 +111,7 @@ std = [ "pallet-authorship/std", "sp-consensus-babe/std", "pallet-babe/std", + "pallet-bags-list/std", "pallet-balances/std", "pallet-bounties/std", "sp-block-builder/std", @@ -179,6 +181,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "pallet-assets/runtime-benchmarks", "pallet-babe/runtime-benchmarks", + "pallet-bags-list/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-bounties/runtime-benchmarks", "pallet-collective/runtime-benchmarks", diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 7c6475bd18d6a..881641e15136c 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -92,6 +92,9 @@ pub mod constants; use constants::{currency::*, time::*}; use sp_runtime::generic::Era; +/// Generated voter bag information. +mod voter_bags; + // Make the WASM binary available. #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); @@ -373,6 +376,7 @@ impl pallet_babe::Config for Runtime { pallet_babe::EquivocationHandler; type WeightInfo = (); + type MaxAuthorities = MaxAuthorities; } parameter_types! { @@ -525,6 +529,9 @@ impl pallet_staking::Config for Runtime { type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; type ElectionProvider = ElectionProviderMultiPhase; type GenesisElectionProvider = onchain::OnChainSequentialPhragmen; + // Alternatively, use pallet_staking::UseNominatorsMap to just use the nominators map. + // Note that the aforementioned does not scale to a very large number of nominators. + type SortedListProvider = BagsList; type WeightInfo = pallet_staking::weights::SubstrateWeight; } @@ -552,6 +559,11 @@ parameter_types! { *RuntimeBlockLength::get() .max .get(DispatchClass::Normal); + + // BagsList allows a practically unbounded count of nominators to participate in NPoS elections. + // To ensure we respect memory limits when using the BagsList this must be set to a number of + // voters we know can fit into a single vec allocation. + pub const VoterSnapshotPerBlock: u32 = 10_000; } sp_npos_elections::generate_solution_type!( @@ -565,17 +577,18 @@ sp_npos_elections::generate_solution_type!( pub const MAX_NOMINATIONS: u32 = ::LIMIT as u32; -/// The numbers configured here should always be more than the the maximum limits of staking pallet -/// to ensure election snapshot will not run out of memory. +/// The numbers configured here could always be more than the the maximum limits of staking pallet +/// to ensure election snapshot will not run out of memory. For now, we set them to smaller values +/// since the staking is bounded and the weight pipeline takes hours for this single pallet. pub struct BenchmarkConfig; impl pallet_election_provider_multi_phase::BenchmarkingConfig for BenchmarkConfig { - const VOTERS: [u32; 2] = [5_000, 10_000]; - const TARGETS: [u32; 2] = [1_000, 2_000]; - const ACTIVE_VOTERS: [u32; 2] = [1000, 4_000]; - const DESIRED_TARGETS: [u32; 2] = [400, 800]; - const SNAPSHOT_MAXIMUM_VOTERS: u32 = 25_000; - const MINER_MAXIMUM_VOTERS: u32 = 15_000; - const MAXIMUM_TARGETS: u32 = 2000; + const VOTERS: [u32; 2] = [1000, 2000]; + const TARGETS: [u32; 2] = [500, 1000]; + const ACTIVE_VOTERS: [u32; 2] = [500, 800]; + const DESIRED_TARGETS: [u32; 2] = [200, 400]; + const SNAPSHOT_MAXIMUM_VOTERS: u32 = 1000; + const MINER_MAXIMUM_VOTERS: u32 = 1000; + const MAXIMUM_TARGETS: u32 = 300; } /// Maximum number of iterations for balancing that will be executed in the embedded OCW @@ -634,6 +647,18 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type WeightInfo = pallet_election_provider_multi_phase::weights::SubstrateWeight; type ForceOrigin = EnsureRootOrHalfCouncil; type BenchmarkingConfig = BenchmarkConfig; + type VoterSnapshotPerBlock = VoterSnapshotPerBlock; +} + +parameter_types! { + pub const BagThresholds: &'static [u64] = &voter_bags::THRESHOLDS; +} + +impl pallet_bags_list::Config for Runtime { + type Event = Event; + type VoteWeightProvider = Staking; + type WeightInfo = pallet_bags_list::weights::SubstrateWeight; + type BagThresholds = BagThresholds; } parameter_types! { @@ -909,6 +934,9 @@ parameter_types! { /// We prioritize im-online heartbeats over election solution submission. pub const StakingUnsignedPriority: TransactionPriority = TransactionPriority::max_value() / 2; pub const MaxAuthorities: u32 = 100; + pub const MaxKeys: u32 = 10_000; + pub const MaxPeerInHeartbeats: u32 = 10_000; + pub const MaxPeerDataEncodingSize: u32 = 1_000; } impl frame_system::offchain::CreateSignedTransaction for Runtime @@ -973,6 +1001,9 @@ impl pallet_im_online::Config for Runtime { type ReportUnresponsiveness = Offences; type UnsignedPriority = ImOnlineUnsignedPriority; type WeightInfo = pallet_im_online::weights::SubstrateWeight; + type MaxKeys = MaxKeys; + type MaxPeerInHeartbeats = MaxPeerInHeartbeats; + type MaxPeerDataEncodingSize = MaxPeerDataEncodingSize; } impl pallet_offences::Config for Runtime { @@ -1006,6 +1037,7 @@ impl pallet_grandpa::Config for Runtime { >; type WeightInfo = (); + type MaxAuthorities = MaxAuthorities; } parameter_types! { @@ -1248,12 +1280,13 @@ construct_runtime!( Multisig: pallet_multisig::{Pallet, Call, Storage, Event}, Bounties: pallet_bounties::{Pallet, Call, Storage, Event}, Tips: pallet_tips::{Pallet, Call, Storage, Event}, - Assets: pallet_assets::{Pallet, Call, Storage, Event}, + Assets: pallet_assets::{Pallet, Call, Storage, Event, Config}, Mmr: pallet_mmr::{Pallet, Storage}, Lottery: pallet_lottery::{Pallet, Call, Storage, Event}, Gilt: pallet_gilt::{Pallet, Call, Storage, Event, Config}, Uniques: pallet_uniques::{Pallet, Call, Storage, Event}, TransactionStorage: pallet_transaction_storage::{Pallet, Call, Storage, Inherent, Config, Event}, + BagsList: pallet_bags_list::{Pallet, Call, Storage, Event}, } ); @@ -1409,7 +1442,7 @@ impl_runtime_apis! { slot_duration: Babe::slot_duration(), epoch_length: EpochDuration::get(), c: BABE_GENESIS_EPOCH_CONFIG.c, - genesis_authorities: Babe::authorities(), + genesis_authorities: Babe::authorities().to_vec(), randomness: Babe::randomness(), allowed_slots: BABE_GENESIS_EPOCH_CONFIG.allowed_slots, } @@ -1555,9 +1588,16 @@ impl_runtime_apis! { #[cfg(feature = "try-runtime")] impl frame_try_runtime::TryRuntime for Runtime { - fn on_runtime_upgrade() -> Result<(Weight, Weight), sp_runtime::RuntimeString> { - let weight = Executive::try_runtime_upgrade()?; - Ok((weight, RuntimeBlockWeights::get().max_block)) + fn on_runtime_upgrade() -> (Weight, Weight) { + // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to + // have a backtrace here. If any of the pre/post migration checks fail, we shall stop + // right here and right now. + let weight = Executive::try_runtime_upgrade().unwrap(); + (weight, RuntimeBlockWeights::get().max_block) + } + + fn execute_block_no_check(block: Block) -> Weight { + Executive::execute_block_no_check(block) } } @@ -1581,6 +1621,7 @@ impl_runtime_apis! { list_benchmark!(list, extra, pallet_assets, Assets); list_benchmark!(list, extra, pallet_babe, Babe); + list_benchmark!(list, extra, pallet_bags_list, BagsList); list_benchmark!(list, extra, pallet_balances, Balances); list_benchmark!(list, extra, pallet_bounties, Bounties); list_benchmark!(list, extra, pallet_collective, Council); @@ -1655,6 +1696,7 @@ impl_runtime_apis! { add_benchmark!(params, batches, pallet_assets, Assets); add_benchmark!(params, batches, pallet_babe, Babe); add_benchmark!(params, batches, pallet_balances, Balances); + add_benchmark!(params, batches, pallet_bags_list, BagsList); add_benchmark!(params, batches, pallet_bounties, Bounties); add_benchmark!(params, batches, pallet_collective, Council); add_benchmark!(params, batches, pallet_contracts, Contracts); diff --git a/bin/node/runtime/src/voter_bags.rs b/bin/node/runtime/src/voter_bags.rs new file mode 100644 index 0000000000000..c4c731a58badc --- /dev/null +++ b/bin/node/runtime/src/voter_bags.rs @@ -0,0 +1,235 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated voter bag thresholds. +//! +//! Generated on 2021-07-05T09:17:40.469754927+00:00 +//! for the node runtime. + +/// Existential weight for this runtime. +#[cfg(any(test, feature = "std"))] +#[allow(unused)] +pub const EXISTENTIAL_WEIGHT: u64 = 100_000_000_000_000; + +/// Constant ratio between bags for this runtime. +#[cfg(any(test, feature = "std"))] +#[allow(unused)] +pub const CONSTANT_RATIO: f64 = 1.0628253590743408; + +/// Upper thresholds delimiting the bag list. +pub const THRESHOLDS: [u64; 200] = [ + 100_000_000_000_000, + 106_282_535_907_434, + 112_959_774_389_150, + 120_056_512_776_105, + 127_599_106_300_477, + 135_615_565_971_369, + 144_135_662_599_590, + 153_191_037_357_827, + 162_815_319_286_803, + 173_044_250_183_800, + 183_915_817_337_347, + 195_470_394_601_017, + 207_750_892_330_229, + 220_802_916_738_890, + 234_674_939_267_673, + 249_418_476_592_914, + 265_088_281_944_639, + 281_742_548_444_211, + 299_443_125_216_738, + 318_255_747_080_822, + 338_250_278_668_647, + 359_500_973_883_001, + 382_086_751_654_776, + 406_091_489_025_036, + 431_604_332_640_068, + 458_720_029_816_222, + 487_539_280_404_019, + 518_169_110_758_247, + 550_723_271_202_866, + 585_322_658_466_782, + 622_095_764_659_305, + 661_179_154_452_653, + 702_717_972_243_610, + 746_866_481_177_808, + 793_788_636_038_393, + 843_658_692_126_636, + 896_661_852_395_681, + 952_994_955_240_703, + 1_012_867_205_499_736, + 1_076_500_951_379_881, + 1_144_132_510_194_192, + 1_216_013_045_975_769, + 1_292_409_502_228_280, + 1_373_605_593_276_862, + 1_459_902_857_901_004, + 1_551_621_779_162_291, + 1_649_102_974_585_730, + 1_752_708_461_114_642, + 1_862_822_999_536_805, + 1_979_855_523_374_646, + 2_104_240_657_545_975, + 2_236_440_332_435_128, + 2_376_945_499_368_703, + 2_526_277_953_866_680, + 2_684_992_273_439_945, + 2_853_677_877_130_641, + 3_032_961_214_443_876, + 3_223_508_091_799_862, + 3_426_026_145_146_232, + 3_641_267_467_913_124, + 3_870_031_404_070_482, + 4_113_167_516_660_186, + 4_371_578_742_827_277, + 4_646_224_747_067_156, + 4_938_125_485_141_739, + 5_248_364_991_899_922, + 5_578_095_407_069_235, + 5_928_541_253_969_291, + 6_301_003_987_036_955, + 6_696_866_825_051_405, + 7_117_599_888_008_300, + 7_564_765_656_719_910, + 8_040_024_775_416_580, + 8_545_142_218_898_723, + 9_081_993_847_142_344, + 9_652_573_371_700_016, + 10_258_999_759_768_490, + 10_903_525_103_419_522, + 11_588_542_983_217_942, + 12_316_597_357_287_042, + 13_090_392_008_832_678, + 13_912_800_587_211_472, + 14_786_877_279_832_732, + 15_715_868_154_526_436, + 16_703_223_214_499_558, + 17_752_609_210_649_358, + 18_867_923_258_814_856, + 20_053_307_312_537_008, + 21_313_163_545_075_252, + 22_652_170_697_804_756, + 24_075_301_455_707_600, + 25_587_840_914_485_432, + 27_195_406_207_875_088, + 28_903_967_368_057_400, + 30_719_869_496_628_636, + 32_649_856_328_471_220, + 34_701_095_276_033_064, + 36_881_204_047_022_752, + 39_198_278_934_370_992, + 41_660_924_883_519_016, + 44_278_287_448_695_240, + 47_060_086_756_856_400, + 50_016_653_605_425_536, + 53_158_967_827_883_320, + 56_498_699_069_691_424, + 60_048_250_125_977_912, + 63_820_803_001_928_304, + 67_830_367_866_937_216, + 72_091_835_084_322_176, + 76_621_030_509_822_880, + 81_434_774_264_248_528, + 86_550_943_198_537_824, + 91_988_537_283_208_848, + 97_767_750_168_749_840, + 103_910_044_178_992_000, + 110_438_230_015_967_792, + 117_376_551_472_255_616, + 124_750_775_465_407_920, + 132_588_287_728_824_640, + 140_918_194_514_440_064, + 149_771_430_684_917_568, + 159_180_874_596_775_264, + 169_181_470_201_085_280, + 179_810_356_815_193_344, + 191_107_007_047_393_216, + 203_113_373_386_768_288, + 215_874_044_002_592_672, + 229_436_408_331_885_600, + 243_850_833_070_063_392, + 259_170_849_218_267_264, + 275_453_350_882_006_752, + 292_758_806_559_399_232, + 311_151_483_703_668_992, + 330_699_687_393_865_920, + 351_476_014_000_157_824, + 373_557_620_785_735_808, + 397_026_512_446_556_096, + 421_969_845_653_044_224, + 448_480_252_724_740_928, + 476_656_185_639_923_904, + 506_602_281_657_757_760, + 538_429_751_910_786_752, + 572_256_794_410_890_176, + 608_209_033_002_485_632, + 646_419_983_893_124_352, + 687_031_551_494_039_552, + 730_194_555_412_054_016, + 776_069_290_549_944_960, + 824_826_122_395_314_176, + 876_646_119_708_695_936, + 931_721_726_960_522_368, + 990_257_479_014_182_144, + 1_052_470_760_709_299_712, + 1_118_592_614_166_106_112, + 1_188_868_596_808_997_376, + 1_263_559_693_295_730_432, + 1_342_943_284_738_898_688, + 1_427_314_178_819_094_784, + 1_516_985_704_615_302_400, + 1_612_290_876_218_400_768, + 1_713_583_629_449_105_408, + 1_821_240_136_273_157_632, + 1_935_660_201_795_120_128, + 2_057_268_749_018_809_600, + 2_186_517_396_888_336_384, + 2_323_886_137_470_138_880, + 2_469_885_118_504_583_168, + 2_625_056_537_947_004_416, + 2_789_976_657_533_970_944, + 2_965_257_942_852_572_160, + 3_151_551_337_860_326_400, + 3_349_548_682_302_620_672, + 3_559_985_281_005_267_968, + 3_783_642_634_583_792_128, + 4_021_351_341_710_503_936, + 4_273_994_183_717_548_544, + 4_542_509_402_991_247_872, + 4_827_894_187_332_742_144, + 5_131_208_373_224_844_288, + 5_453_578_381_757_959_168, + 5_796_201_401_831_965_696, + 6_160_349_836_169_256_960, + 6_547_376_026_650_146_816, + 6_958_717_276_519_173_120, + 7_395_901_188_113_309_696, + 7_860_551_335_934_872_576, + 8_354_393_296_137_270_272, + 8_879_261_054_815_360_000, + 9_437_103_818_898_946_048, + 10_029_993_254_943_105_024, + 10_660_131_182_698_121_216, + 11_329_857_752_030_707_712, + 12_041_660_133_563_240_448, + 12_798_181_755_305_525_248, + 13_602_232_119_581_272_064, + 14_456_797_236_706_498_560, + 15_365_050_714_167_523_328, + 16_330_365_542_480_556_032, + 17_356_326_621_502_140_416, + 18_446_744_073_709_551_615, +]; diff --git a/bin/node/testing/src/genesis.rs b/bin/node/testing/src/genesis.rs index 50c1e6f9d20be..845227c5acee9 100644 --- a/bin/node/testing/src/genesis.rs +++ b/bin/node/testing/src/genesis.rs @@ -98,6 +98,7 @@ pub fn config_endowed( treasury: Default::default(), society: SocietyConfig { members: vec![alice(), bob()], pot: 0, max_members: 999 }, vesting: Default::default(), + assets: Default::default(), gilt: Default::default(), transaction_storage: Default::default(), } diff --git a/bin/utils/subkey/README.md b/bin/utils/subkey/README.md index fbb486247a770..2310c59f4a283 100644 --- a/bin/utils/subkey/README.md +++ b/bin/utils/subkey/README.md @@ -18,15 +18,37 @@ If you save any output of `subkey` into a file, make sure to apply proper permis The following guide explains *some* of the `subkey` commands. For the full list and the most up to date documentation, make sure to check the integrated help with `subkey --help`. +### Install with Cargo + +You will need to have the Substrate build dependencies to install Subkey. Use the following two commands to install the dependencies and Subkey, respectively: + +Command: + +```bash +# Use the `--fast` flag to get the dependencies without needing to install the Substrate and Subkey binary +curl https://getsubstrate.io -sSf | bash -s -- --fast +# Install only `subkey`, at a specific version of the subkey crate +cargo install --force subkey --git https://github.com/paritytech/substrate --version --locked +``` + +### Run in a container + +```bash +# Use `--pull=always` with the `latest` tag, or specify a version in a tag +docker run -it --pull=always docker.io/parity/subkey:latest +``` + ### Generate a random account Generating a new key is as simple as running: - subkey generate +```bash +subkey generate +``` The output looks similar to: -``` +```text Secret phrase `hotel forest jar hover kite book view eight stuff angle legend defense` is account: Secret seed: 0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c92d Public key (hex): 0xfec70cfbf1977c6965b5af10a4534a6a35d548eb14580594d0bc543286892515 @@ -50,6 +72,7 @@ The **SS58 address** (or **Public Address**) of a new account is a reprensentati You can read more about the SS58 format in the [substrate wiki](https://github.com/paritytech/substrate/wiki/External-Address-Format-(SS58)) and see the list of reserved prefixes in the [Polkadot wiki](https://wiki.polkadot.network/docs/build-ss58-registry). For instance, considering the previous seed `0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c92d` the SS58 addresses are: + - Polkadot: `16m4J167Mptt8UXL8aGSAi7U2FnPpPxZHPrCgMG9KJzVoFqM` - Kusama: `JLNozAv8QeLSbLFwe2UvWeKKE4yvmDbfGxTuiYkF2BUMx4M` @@ -58,12 +81,14 @@ For instance, considering the previous seed `0xa05c75731970cc7868a2fb7cb577353cd `subkey` can calso generate the output as *json*. This is useful for automation. command: -``` + +```bash subkey generate --output-type json ``` output: -``` + +```json { "accountId": "0xfec70cfbf1977c6965b5af10a4534a6a35d548eb14580594d0bc543286892515", "publicKey": "0xfec70cfbf1977c6965b5af10a4534a6a35d548eb14580594d0bc543286892515", @@ -76,12 +101,14 @@ output: So if you only want to get the `secretSeed` for instance, you can use: command: -``` + +```bash subkey generate --output-type json | jq -r .secretSeed ``` output: -``` + +```text 0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c92d ``` @@ -89,10 +116,13 @@ output: `subkey` supports an additional user-defined secret that will be appended to the seed. Let's see the following example: - subkey generate --password extra_secret +```bash +subkey generate --password extra_secret +``` output: -``` + +```text Secret phrase `soup lyrics media market way crouch elevator put moon useful question wide` is account: Secret seed: 0xe7cfd179d6537a676cb94bac3b5c5c9cb1550e846ac4541040d077dfbac2e7fd Public key (hex): 0xf6a233c3e1de1a2ae0486100b460b3ce3d7231ddfe9dadabbd35ab968c70905d @@ -102,11 +132,15 @@ Secret phrase `soup lyrics media market way crouch elevator put moon useful ques Using the `inspect` command (see more details below), we see that knowning only the **secret seed** is no longer sufficient to recover the account: - subkey inspect "soup lyrics media market way crouch elevator put moon useful question wide" +```bash +subkey inspect "soup lyrics media market way crouch elevator put moon useful question wide" +``` which recovers the account `5Fe4sqj2K4fRuzEGvToi4KATqZfiDU7TqynjXG6PZE2dxwyh` and not `5He5pZpc7AJ8evPuab37vJF6KkFDqq9uDq2WXh877Qw6iaVC` as we expected. The additional user-defined **password** (`extra_secret` in our example) is now required to fully recover the account. Let's inspect the the previous mnemonic, this time passing also the required `password` as shown below: - subkey inspect --password extra_secret "soup lyrics media market way crouch elevator put moon useful question wide" +```bash +subkey inspect --password extra_secret "soup lyrics media market way crouch elevator put moon useful question wide" +``` This time, we properly recovered `5He5pZpc7AJ8evPuab37vJF6KkFDqq9uDq2WXh877Qw6iaVC`. @@ -116,23 +150,29 @@ If you have *some data* about a key, `subkey inpsect` will help you discover mor If you have **secrets** that you would like to verify for instance, you can use: - subkey inspect < mnemonic | seed > +```bash +subkey inspect < mnemonic | seed > +``` If you have only **public data**, you can see a subset of the information: - subkey inspect --public < pubkey | address > +```bash +subkey inspect --public < pubkey | address > +``` **NOTE**: While you will be able to recover the secret seed from the mnemonic, the opposite is not possible. **NOTE**: For obvious reasons, the **secrets** cannot be recovered from passing **public data** such as `pubkey` or `address` as input. command: -``` + +```bash subkey inspect 0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c92d ``` output: -``` + +```text Secret Key URI `0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c92d` is account: Secret seed: 0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c92d Public key (hex): 0xfec70cfbf1977c6965b5af10a4534a6a35d548eb14580594d0bc543286892515 @@ -144,17 +184,23 @@ Secret Key URI `0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c9 `subkey` allows using a **secret key** to sign a random message. The signature can then be verified by anyone using your **public key**: - echo -n | subkey sign --suri +```bash +echo -n | subkey sign --suri +``` example: - MESSAGE=hello - SURI=0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c92d - echo -n $MESSAGE | subkey sign --suri $SURI +```text +MESSAGE=hello +SURI=0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c92d +echo -n $MESSAGE | subkey sign --suri $SURI +``` output: - 9201af3788ad4f986b800853c79da47155f2e08fde2070d866be4c27ab060466fea0623dc2b51f4392f4c61f25381a62848dd66c5d8217fae3858e469ebd668c +```text +9201af3788ad4f986b800853c79da47155f2e08fde2070d866be4c27ab060466fea0623dc2b51f4392f4c61f25381a62848dd66c5d8217fae3858e469ebd668c +``` **NOTE**: Each run of the `sign` command will yield a different output. While each signature is different, they are all valid. @@ -162,34 +208,44 @@ output: Given a message, a signature and an address, `subkey` can verify whether the **message** has been digitally signed by the holder (or one of the holders) of the **private key** for the given **address**: - echo -n | subkey verify
+```bash +echo -n | subkey verify
+``` example: - MESSAGE=hello - URI=0xfec70cfbf1977c6965b5af10a4534a6a35d548eb14580594d0bc543286892515 - SIGNATURE=9201af3788ad4f986b800853c79da47155f2e08fde2070d866be4c27ab060466fea0623dc2b51f4392f4c61f25381a62848dd66c5d8217fae3858e469ebd668c - echo -n $MESSAGE | subkey verify $SIGNATURE $URI +```bash +MESSAGE=hello +URI=0xfec70cfbf1977c6965b5af10a4534a6a35d548eb14580594d0bc543286892515 +SIGNATURE=9201af3788ad4f986b800853c79da47155f2e08fde2070d866be4c27ab060466fea0623dc2b51f4392f4c61f25381a62848dd66c5d8217fae3858e469ebd668c +echo -n $MESSAGE | subkey verify $SIGNATURE $URI +``` output: - Signature verifies correctly. +```text +Signature verifies correctly. +``` A failure looks like: - Error: SignatureInvalid +```text +Error: SignatureInvalid +``` ### Using the vanity generator You can use the included vanity generator to find a seed that provides an address which includes the desired pattern. Be warned, depending on your hardware this may take a while. command: -``` + +```bash subkey vanity --network polkadot --pattern bob ``` output: -``` + +```text Generating key containing pattern 'bob' best: 190 == top: 189 Secret Key URI `0x8c9a73097f235b84021a446bc2826a00c690ea0be3e0d81a84931cb4146d6691` is account: diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index 144a3ab6850ff..bbee60ae98dcf 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -344,7 +344,7 @@ where let mut t2 = futures_timer::Delay::new(deadline.saturating_duration_since((self.now)()) / 8).fuse(); - let pending_iterator = select! { + let mut pending_iterator = select! { res = t1 => res, _ = t2 => { log::warn!( @@ -363,7 +363,7 @@ where let mut transaction_pushed = false; let mut hit_block_size_limit = false; - for pending_tx in pending_iterator { + while let Some(pending_tx) = pending_iterator.next() { if (self.now)() > deadline { debug!( "Consensus deadline reached when pushing block transactions, \ @@ -378,6 +378,7 @@ where let block_size = block_builder.estimate_block_size(self.include_proof_in_block_size_estimation); if block_size + pending_tx_data.encoded_size() > block_size_limit { + pending_iterator.report_invalid(&pending_tx); if skipped < MAX_SKIPPED_TRANSACTIONS { skipped += 1; debug!( @@ -400,6 +401,7 @@ where debug!("[{:?}] Pushed to the block.", pending_tx_hash); }, Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => { + pending_iterator.report_invalid(&pending_tx); if skipped < MAX_SKIPPED_TRANSACTIONS { skipped += 1; debug!( @@ -412,6 +414,7 @@ where } }, Err(e) if skipped > 0 => { + pending_iterator.report_invalid(&pending_tx); trace!( "[{:?}] Ignoring invalid transaction when skipping: {}", pending_tx_hash, @@ -419,6 +422,7 @@ where ); }, Err(e) => { + pending_iterator.report_invalid(&pending_tx); debug!("[{:?}] Invalid transaction: {}", pending_tx_hash, e); unqueue_invalid.push(pending_tx_hash); }, @@ -718,7 +722,7 @@ mod tests { ); // when - let deadline = time::Duration::from_secs(9); + let deadline = time::Duration::from_secs(900); let block = block_on(proposer.propose(Default::default(), Default::default(), deadline, None)) .map(|r| r.block) diff --git a/client/beefy/Cargo.toml b/client/beefy/Cargo.toml new file mode 100644 index 0000000000000..d4541288a6287 --- /dev/null +++ b/client/beefy/Cargo.toml @@ -0,0 +1,38 @@ +[package] +name = "beefy-gadget" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +fnv = "1.0.6" +futures = "0.3" +log = "0.4" +parking_lot = "0.11" +thiserror = "1.0" +wasm-timer = "0.2.5" + +codec = { version = "2.2.0", package = "parity-scale-codec", features = ["derive"] } +prometheus = { version = "0.9.0", package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } + +sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } +sp-application-crypto = { version = "4.0.0-dev", path = "../../primitives/application-crypto" } +sp-arithmetic = { version = "4.0.0-dev", path = "../../primitives/arithmetic" } +sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +sp-keystore = { version = "0.10.0-dev", path = "../../primitives/keystore" } +sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } + +sc-utils = { version = "4.0.0-dev", path = "../utils" } +sc-client-api = { version = "4.0.0-dev", path = "../api" } +sc-keystore = { version = "4.0.0-dev", path = "../keystore" } +sc-network = { version = "0.10.0-dev", path = "../network" } +sc-network-gossip = { version = "0.10.0-dev", path = "../network-gossip" } + +beefy-primitives = { version = "4.0.0-dev", path = "../../primitives/beefy" } + +[dev-dependencies] +sc-network-test = { version = "0.8.0", path = "../network/test" } + +strum = { version = "0.21", features = ["derive"] } diff --git a/client/beefy/rpc/Cargo.toml b/client/beefy/rpc/Cargo.toml new file mode 100644 index 0000000000000..8af2fa3eac867 --- /dev/null +++ b/client/beefy/rpc/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "beefy-gadget-rpc" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +futures = "0.3.16" +log = "0.4" +serde = { version = "1.0.130", features = ["derive"] } + +jsonrpc-core = "18.0.0" +jsonrpc-core-client = "18.0.0" +jsonrpc-derive = "18.0.0" +jsonrpc-pubsub = "18.0.0" + +codec = { version = "2.2.0", package = "parity-scale-codec", features = ["derive"] } + +sc-rpc = { version = "4.0.0-dev", path = "../../rpc" } + +sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } +sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } + +beefy-gadget = { version = "4.0.0-dev", path = "../." } +beefy-primitives = { version = "4.0.0-dev", path = "../../../primitives/beefy" } diff --git a/client/beefy/rpc/src/lib.rs b/client/beefy/rpc/src/lib.rs new file mode 100644 index 0000000000000..c9a09525569b8 --- /dev/null +++ b/client/beefy/rpc/src/lib.rs @@ -0,0 +1,114 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! RPC API for BEEFY. + +#![warn(missing_docs)] + +use std::sync::Arc; + +use sp_runtime::traits::Block as BlockT; + +use futures::{FutureExt, SinkExt, StreamExt}; +use jsonrpc_derive::rpc; +use jsonrpc_pubsub::{manager::SubscriptionManager, typed::Subscriber, SubscriptionId}; +use log::warn; + +use beefy_gadget::notification::BeefySignedCommitmentStream; + +mod notification; + +/// Provides RPC methods for interacting with BEEFY. +#[rpc] +pub trait BeefyApi { + /// RPC Metadata + type Metadata; + + /// Returns the block most recently finalized by BEEFY, alongside side its justification. + #[pubsub( + subscription = "beefy_justifications", + subscribe, + name = "beefy_subscribeJustifications" + )] + fn subscribe_justifications( + &self, + metadata: Self::Metadata, + subscriber: Subscriber, + ); + + /// Unsubscribe from receiving notifications about recently finalized blocks. + #[pubsub( + subscription = "beefy_justifications", + unsubscribe, + name = "beefy_unsubscribeJustifications" + )] + fn unsubscribe_justifications( + &self, + metadata: Option, + id: SubscriptionId, + ) -> jsonrpc_core::Result; +} + +/// Implements the BeefyApi RPC trait for interacting with BEEFY. +pub struct BeefyRpcHandler { + signed_commitment_stream: BeefySignedCommitmentStream, + manager: SubscriptionManager, +} + +impl BeefyRpcHandler { + /// Creates a new BeefyRpcHandler instance. + pub fn new(signed_commitment_stream: BeefySignedCommitmentStream, executor: E) -> Self + where + E: futures::task::Spawn + Send + Sync + 'static, + { + let manager = SubscriptionManager::new(Arc::new(executor)); + Self { signed_commitment_stream, manager } + } +} + +impl BeefyApi for BeefyRpcHandler +where + Block: BlockT, +{ + type Metadata = sc_rpc::Metadata; + + fn subscribe_justifications( + &self, + _metadata: Self::Metadata, + subscriber: Subscriber, + ) { + let stream = self + .signed_commitment_stream + .subscribe() + .map(|x| Ok::<_, ()>(Ok(notification::SignedCommitment::new::(x)))); + + self.manager.add(subscriber, |sink| { + stream + .forward(sink.sink_map_err(|e| warn!("Error sending notifications: {:?}", e))) + .map(|_| ()) + }); + } + + fn unsubscribe_justifications( + &self, + _metadata: Option, + id: SubscriptionId, + ) -> jsonrpc_core::Result { + Ok(self.manager.cancel(id)) + } +} diff --git a/client/beefy/rpc/src/notification.rs b/client/beefy/rpc/src/notification.rs new file mode 100644 index 0000000000000..4830d72905a98 --- /dev/null +++ b/client/beefy/rpc/src/notification.rs @@ -0,0 +1,39 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use codec::Encode; +use serde::{Deserialize, Serialize}; + +use sp_runtime::traits::Block as BlockT; + +/// An encoded signed commitment proving that the given header has been finalized. +/// The given bytes should be the SCALE-encoded representation of a +/// `beefy_primitives::SignedCommitment`. +#[derive(Clone, Serialize, Deserialize)] +pub struct SignedCommitment(sp_core::Bytes); + +impl SignedCommitment { + pub fn new( + signed_commitment: beefy_gadget::notification::SignedCommitment, + ) -> Self + where + Block: BlockT, + { + SignedCommitment(signed_commitment.encode().into()) + } +} diff --git a/client/beefy/src/error.rs b/client/beefy/src/error.rs new file mode 100644 index 0000000000000..db532d34c1e3b --- /dev/null +++ b/client/beefy/src/error.rs @@ -0,0 +1,31 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! BEEFY gadget specific errors +//! +//! Used for BEEFY gadget interal error handling only + +use std::fmt::Debug; + +#[derive(Debug, thiserror::Error, PartialEq)] +pub enum Error { + #[error("Keystore error: {0}")] + Keystore(String), + #[error("Signature error: {0}")] + Signature(String), +} diff --git a/client/beefy/src/gossip.rs b/client/beefy/src/gossip.rs new file mode 100644 index 0000000000000..d0199964b6ebf --- /dev/null +++ b/client/beefy/src/gossip.rs @@ -0,0 +1,236 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::{collections::BTreeMap, time::Duration}; + +use sc_network::PeerId; +use sc_network_gossip::{MessageIntent, ValidationResult, Validator, ValidatorContext}; +use sp_core::hashing::twox_64; +use sp_runtime::traits::{Block, Hash, Header, NumberFor}; + +use codec::{Decode, Encode}; +use log::{debug, trace}; +use parking_lot::{Mutex, RwLock}; +use wasm_timer::Instant; + +use beefy_primitives::{ + crypto::{Public, Signature}, + MmrRootHash, VoteMessage, +}; + +use crate::keystore::BeefyKeystore; + +#[cfg(test)] +#[path = "gossip_tests.rs"] +mod tests; + +// Limit BEEFY gossip by keeping only a bound number of voting rounds alive. +const MAX_LIVE_GOSSIP_ROUNDS: usize = 3; + +// Timeout for rebroadcasting messages. +const REBROADCAST_AFTER: Duration = Duration::from_secs(60 * 5); + +/// Gossip engine messages topic +pub(crate) fn topic() -> B::Hash +where + B: Block, +{ + <::Hashing as Hash>::hash(b"beefy") +} + +/// A type that represents hash of the message. +pub type MessageHash = [u8; 8]; + +type KnownVotes = BTreeMap, fnv::FnvHashSet>; + +/// BEEFY gossip validator +/// +/// Validate BEEFY gossip messages and limit the number of live BEEFY voting rounds. +/// +/// Allows messages from last [`MAX_LIVE_GOSSIP_ROUNDS`] to flow, everything else gets +/// rejected/expired. +/// +///All messaging is handled in a single BEEFY global topic. +pub(crate) struct GossipValidator +where + B: Block, +{ + topic: B::Hash, + known_votes: RwLock>, + next_rebroadcast: Mutex, +} + +impl GossipValidator +where + B: Block, +{ + pub fn new() -> GossipValidator { + GossipValidator { + topic: topic::(), + known_votes: RwLock::new(BTreeMap::new()), + next_rebroadcast: Mutex::new(Instant::now() + REBROADCAST_AFTER), + } + } + + /// Note a voting round. + /// + /// Noting `round` will keep `round` live. + /// + /// We retain the [`MAX_LIVE_GOSSIP_ROUNDS`] most **recent** voting rounds as live. + /// As long as a voting round is live, it will be gossiped to peer nodes. + pub(crate) fn note_round(&self, round: NumberFor) { + debug!(target: "beefy", "🥩 About to note round #{}", round); + + let mut live = self.known_votes.write(); + + if !live.contains_key(&round) { + live.insert(round, Default::default()); + } + + if live.len() > MAX_LIVE_GOSSIP_ROUNDS { + let to_remove = live.iter().next().map(|x| x.0).copied(); + if let Some(first) = to_remove { + live.remove(&first); + } + } + } + + fn add_known(known_votes: &mut KnownVotes, round: &NumberFor, hash: MessageHash) { + known_votes.get_mut(round).map(|known| known.insert(hash)); + } + + // Note that we will always keep the most recent unseen round alive. + // + // This is a preliminary fix and the detailed description why we are + // doing this can be found as part of the issue below + // + // https://github.com/paritytech/grandpa-bridge-gadget/issues/237 + // + fn is_live(known_votes: &KnownVotes, round: &NumberFor) -> bool { + let unseen_round = if let Some(max_known_round) = known_votes.keys().last() { + round > max_known_round + } else { + known_votes.is_empty() + }; + + known_votes.contains_key(round) || unseen_round + } + + fn is_known(known_votes: &KnownVotes, round: &NumberFor, hash: &MessageHash) -> bool { + known_votes.get(round).map(|known| known.contains(hash)).unwrap_or(false) + } +} + +impl Validator for GossipValidator +where + B: Block, +{ + fn validate( + &self, + _context: &mut dyn ValidatorContext, + sender: &PeerId, + mut data: &[u8], + ) -> ValidationResult { + if let Ok(msg) = + VoteMessage::, Public, Signature>::decode(&mut data) + { + let msg_hash = twox_64(data); + let round = msg.commitment.block_number; + + // Verify general usefulness of the message. + // We are going to discard old votes right away (without verification) + // Also we keep track of already received votes to avoid verifying duplicates. + { + let known_votes = self.known_votes.read(); + + if !GossipValidator::::is_live(&known_votes, &round) { + return ValidationResult::Discard + } + + if GossipValidator::::is_known(&known_votes, &round, &msg_hash) { + return ValidationResult::ProcessAndKeep(self.topic) + } + } + + if BeefyKeystore::verify(&msg.id, &msg.signature, &msg.commitment.encode()) { + GossipValidator::::add_known(&mut *self.known_votes.write(), &round, msg_hash); + return ValidationResult::ProcessAndKeep(self.topic) + } else { + // TODO: report peer + debug!(target: "beefy", "🥩 Bad signature on message: {:?}, from: {:?}", msg, sender); + } + } + + ValidationResult::Discard + } + + fn message_expired<'a>(&'a self) -> Box bool + 'a> { + let known_votes = self.known_votes.read(); + Box::new(move |_topic, mut data| { + let msg = match VoteMessage::, Public, Signature>::decode( + &mut data, + ) { + Ok(vote) => vote, + Err(_) => return true, + }; + + let round = msg.commitment.block_number; + let expired = !GossipValidator::::is_live(&known_votes, &round); + + trace!(target: "beefy", "🥩 Message for round #{} expired: {}", round, expired); + + expired + }) + } + + fn message_allowed<'a>( + &'a self, + ) -> Box bool + 'a> { + let do_rebroadcast = { + let now = Instant::now(); + let mut next_rebroadcast = self.next_rebroadcast.lock(); + if now >= *next_rebroadcast { + *next_rebroadcast = now + REBROADCAST_AFTER; + true + } else { + false + } + }; + + let known_votes = self.known_votes.read(); + Box::new(move |_who, intent, _topic, mut data| { + if let MessageIntent::PeriodicRebroadcast = intent { + return do_rebroadcast + } + + let msg = match VoteMessage::, Public, Signature>::decode( + &mut data, + ) { + Ok(vote) => vote, + Err(_) => return true, + }; + + let round = msg.commitment.block_number; + let allowed = GossipValidator::::is_live(&known_votes, &round); + + debug!(target: "beefy", "🥩 Message for round #{} allowed: {}", round, allowed); + + allowed + }) + } +} diff --git a/client/beefy/src/gossip_tests.rs b/client/beefy/src/gossip_tests.rs new file mode 100644 index 0000000000000..2d46b873cb7b0 --- /dev/null +++ b/client/beefy/src/gossip_tests.rs @@ -0,0 +1,182 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use sc_keystore::LocalKeystore; +use sc_network_test::Block; +use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; + +use beefy_primitives::{crypto::Signature, Commitment, MmrRootHash, VoteMessage, KEY_TYPE}; + +use crate::keystore::{tests::Keyring, BeefyKeystore}; + +use super::*; + +#[test] +fn note_round_works() { + let gv = GossipValidator::::new(); + + gv.note_round(1u64); + + let live = gv.known_votes.read(); + assert!(GossipValidator::::is_live(&live, &1u64)); + + drop(live); + + gv.note_round(3u64); + gv.note_round(7u64); + gv.note_round(10u64); + + let live = gv.known_votes.read(); + + assert_eq!(live.len(), MAX_LIVE_GOSSIP_ROUNDS); + + assert!(!GossipValidator::::is_live(&live, &1u64)); + assert!(GossipValidator::::is_live(&live, &3u64)); + assert!(GossipValidator::::is_live(&live, &7u64)); + assert!(GossipValidator::::is_live(&live, &10u64)); +} + +#[test] +fn keeps_most_recent_max_rounds() { + let gv = GossipValidator::::new(); + + gv.note_round(3u64); + gv.note_round(7u64); + gv.note_round(10u64); + gv.note_round(1u64); + + let live = gv.known_votes.read(); + + assert_eq!(live.len(), MAX_LIVE_GOSSIP_ROUNDS); + + assert!(GossipValidator::::is_live(&live, &3u64)); + assert!(!GossipValidator::::is_live(&live, &1u64)); + + drop(live); + + gv.note_round(23u64); + gv.note_round(15u64); + gv.note_round(20u64); + gv.note_round(2u64); + + let live = gv.known_votes.read(); + + assert_eq!(live.len(), MAX_LIVE_GOSSIP_ROUNDS); + + assert!(GossipValidator::::is_live(&live, &15u64)); + assert!(GossipValidator::::is_live(&live, &20u64)); + assert!(GossipValidator::::is_live(&live, &23u64)); +} + +#[test] +fn note_same_round_twice() { + let gv = GossipValidator::::new(); + + gv.note_round(3u64); + gv.note_round(7u64); + gv.note_round(10u64); + + let live = gv.known_votes.read(); + + assert_eq!(live.len(), MAX_LIVE_GOSSIP_ROUNDS); + + drop(live); + + // note round #7 again -> should not change anything + gv.note_round(7u64); + + let live = gv.known_votes.read(); + + assert_eq!(live.len(), MAX_LIVE_GOSSIP_ROUNDS); + + assert!(GossipValidator::::is_live(&live, &3u64)); + assert!(GossipValidator::::is_live(&live, &7u64)); + assert!(GossipValidator::::is_live(&live, &10u64)); +} + +struct TestContext; +impl ValidatorContext for TestContext { + fn broadcast_topic(&mut self, _topic: B::Hash, _force: bool) { + todo!() + } + + fn broadcast_message(&mut self, _topic: B::Hash, _message: Vec, _force: bool) { + todo!() + } + + fn send_message(&mut self, _who: &sc_network::PeerId, _message: Vec) { + todo!() + } + + fn send_topic(&mut self, _who: &sc_network::PeerId, _topic: B::Hash, _force: bool) { + todo!() + } +} + +fn sign_commitment( + who: &Keyring, + commitment: &Commitment, +) -> Signature { + let store: SyncCryptoStorePtr = std::sync::Arc::new(LocalKeystore::in_memory()); + SyncCryptoStore::ecdsa_generate_new(&*store, KEY_TYPE, Some(&who.to_seed())).unwrap(); + let beefy_keystore: BeefyKeystore = Some(store).into(); + + beefy_keystore.sign(&who.public(), &commitment.encode()).unwrap() +} + +#[test] +fn should_avoid_verifying_signatures_twice() { + let gv = GossipValidator::::new(); + let sender = sc_network::PeerId::random(); + let mut context = TestContext; + + let commitment = + Commitment { payload: MmrRootHash::default(), block_number: 3_u64, validator_set_id: 0 }; + + let signature = sign_commitment(&Keyring::Alice, &commitment); + + let vote = VoteMessage { commitment, id: Keyring::Alice.public(), signature }; + + gv.note_round(3u64); + gv.note_round(7u64); + gv.note_round(10u64); + + // first time the cache should be populated. + let res = gv.validate(&mut context, &sender, &vote.encode()); + + assert!(matches!(res, ValidationResult::ProcessAndKeep(_))); + assert_eq!(gv.known_votes.read().get(&vote.commitment.block_number).map(|x| x.len()), Some(1)); + + // second time we should hit the cache + let res = gv.validate(&mut context, &sender, &vote.encode()); + + assert!(matches!(res, ValidationResult::ProcessAndKeep(_))); + + // next we should quickly reject if the round is not live. + gv.note_round(11_u64); + gv.note_round(12_u64); + + assert!(!GossipValidator::::is_live( + &*gv.known_votes.read(), + &vote.commitment.block_number + )); + + let res = gv.validate(&mut context, &sender, &vote.encode()); + + assert!(matches!(res, ValidationResult::Discard)); +} diff --git a/client/beefy/src/keystore.rs b/client/beefy/src/keystore.rs new file mode 100644 index 0000000000000..88618b8a5a140 --- /dev/null +++ b/client/beefy/src/keystore.rs @@ -0,0 +1,119 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::convert::{From, TryInto}; + +use sp_application_crypto::RuntimeAppPublic; +use sp_core::keccak_256; +use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; + +use log::warn; + +use beefy_primitives::{ + crypto::{Public, Signature}, + KEY_TYPE, +}; + +use crate::error; + +#[cfg(test)] +#[path = "keystore_tests.rs"] +pub mod tests; + +/// A BEEFY specific keystore implemented as a `Newtype`. This is basically a +/// wrapper around [`sp_keystore::SyncCryptoStore`] and allows to customize +/// common cryptographic functionality. +pub(crate) struct BeefyKeystore(Option); + +impl BeefyKeystore { + /// Check if the keystore contains a private key for one of the public keys + /// contained in `keys`. A public key with a matching private key is known + /// as a local authority id. + /// + /// Return the public key for which we also do have a private key. If no + /// matching private key is found, `None` will be returned. + pub fn authority_id(&self, keys: &[Public]) -> Option { + let store = self.0.clone()?; + + // we do check for multiple private keys as a key store sanity check. + let public: Vec = keys + .iter() + .filter(|k| SyncCryptoStore::has_keys(&*store, &[(k.to_raw_vec(), KEY_TYPE)])) + .cloned() + .collect(); + + if public.len() > 1 { + warn!(target: "beefy", "🥩 Multiple private keys found for: {:?} ({})", public, public.len()); + } + + public.get(0).cloned() + } + + /// Sign `message` with the `public` key. + /// + /// Note that `message` usually will be pre-hashed before being signed. + /// + /// Return the message signature or an error in case of failure. + pub fn sign(&self, public: &Public, message: &[u8]) -> Result { + let store = self.0.clone().ok_or_else(|| error::Error::Keystore("no Keystore".into()))?; + + let msg = keccak_256(message); + let public = public.as_ref(); + + let sig = SyncCryptoStore::ecdsa_sign_prehashed(&*store, KEY_TYPE, public, &msg) + .map_err(|e| error::Error::Keystore(e.to_string()))? + .ok_or_else(|| error::Error::Signature("ecdsa_sign_prehashed() failed".to_string()))?; + + // check that `sig` has the expected result type + let sig = sig.clone().try_into().map_err(|_| { + error::Error::Signature(format!("invalid signature {:?} for key {:?}", sig, public)) + })?; + + Ok(sig) + } + + /// Returns a vector of [`beefy_primitives::crypto::Public`] keys which are currently supported + /// (i.e. found in the keystore). + pub fn public_keys(&self) -> Result, error::Error> { + let store = self.0.clone().ok_or_else(|| error::Error::Keystore("no Keystore".into()))?; + + let pk: Vec = SyncCryptoStore::ecdsa_public_keys(&*store, KEY_TYPE) + .iter() + .map(|k| Public::from(k.clone())) + .collect(); + + Ok(pk) + } + + /// Use the `public` key to verify that `sig` is a valid signature for `message`. + /// + /// Return `true` if the signature is authentic, `false` otherwise. + pub fn verify(public: &Public, sig: &Signature, message: &[u8]) -> bool { + let msg = keccak_256(message); + let sig = sig.as_ref(); + let public = public.as_ref(); + + sp_core::ecdsa::Pair::verify_prehashed(sig, &msg, public) + } +} + +impl From> for BeefyKeystore { + fn from(store: Option) -> BeefyKeystore { + BeefyKeystore(store) + } +} diff --git a/client/beefy/src/keystore_tests.rs b/client/beefy/src/keystore_tests.rs new file mode 100644 index 0000000000000..99e3e42228df2 --- /dev/null +++ b/client/beefy/src/keystore_tests.rs @@ -0,0 +1,275 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::sync::Arc; + +use sc_keystore::LocalKeystore; +use sp_core::{ecdsa, keccak_256, Pair}; +use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; + +use beefy_primitives::{crypto, KEY_TYPE}; + +use super::BeefyKeystore; +use crate::error::Error; + +/// Set of test accounts using [`beefy_primitives::crypto`] types. +#[allow(missing_docs)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, strum::Display, strum::EnumIter)] +pub(crate) enum Keyring { + Alice, + Bob, + Charlie, + Dave, + Eve, + Ferdie, + One, + Two, +} + +impl Keyring { + /// Sign `msg`. + pub fn sign(self, msg: &[u8]) -> crypto::Signature { + let msg = keccak_256(msg); + ecdsa::Pair::from(self).sign_prehashed(&msg).into() + } + + /// Return key pair. + pub fn pair(self) -> crypto::Pair { + ecdsa::Pair::from_string(self.to_seed().as_str(), None).unwrap().into() + } + + /// Return public key. + pub fn public(self) -> crypto::Public { + self.pair().public() + } + + /// Return seed string. + pub fn to_seed(self) -> String { + format!("//{}", self) + } +} + +impl From for crypto::Pair { + fn from(k: Keyring) -> Self { + k.pair() + } +} + +impl From for ecdsa::Pair { + fn from(k: Keyring) -> Self { + k.pair().into() + } +} + +fn keystore() -> SyncCryptoStorePtr { + Arc::new(LocalKeystore::in_memory()) +} + +#[test] +fn verify_should_work() { + let msg = keccak_256(b"I am Alice!"); + let sig = Keyring::Alice.sign(b"I am Alice!"); + + assert!(ecdsa::Pair::verify_prehashed( + &sig.clone().into(), + &msg, + &Keyring::Alice.public().into(), + )); + + // different public key -> fail + assert!(!ecdsa::Pair::verify_prehashed( + &sig.clone().into(), + &msg, + &Keyring::Bob.public().into(), + )); + + let msg = keccak_256(b"I am not Alice!"); + + // different msg -> fail + assert!(!ecdsa::Pair::verify_prehashed(&sig.into(), &msg, &Keyring::Alice.public().into(),)); +} + +#[test] +fn pair_works() { + let want = crypto::Pair::from_string("//Alice", None).expect("Pair failed").to_raw_vec(); + let got = Keyring::Alice.pair().to_raw_vec(); + assert_eq!(want, got); + + let want = crypto::Pair::from_string("//Bob", None).expect("Pair failed").to_raw_vec(); + let got = Keyring::Bob.pair().to_raw_vec(); + assert_eq!(want, got); + + let want = crypto::Pair::from_string("//Charlie", None).expect("Pair failed").to_raw_vec(); + let got = Keyring::Charlie.pair().to_raw_vec(); + assert_eq!(want, got); + + let want = crypto::Pair::from_string("//Dave", None).expect("Pair failed").to_raw_vec(); + let got = Keyring::Dave.pair().to_raw_vec(); + assert_eq!(want, got); + + let want = crypto::Pair::from_string("//Eve", None).expect("Pair failed").to_raw_vec(); + let got = Keyring::Eve.pair().to_raw_vec(); + assert_eq!(want, got); + + let want = crypto::Pair::from_string("//Ferdie", None).expect("Pair failed").to_raw_vec(); + let got = Keyring::Ferdie.pair().to_raw_vec(); + assert_eq!(want, got); + + let want = crypto::Pair::from_string("//One", None).expect("Pair failed").to_raw_vec(); + let got = Keyring::One.pair().to_raw_vec(); + assert_eq!(want, got); + + let want = crypto::Pair::from_string("//Two", None).expect("Pair failed").to_raw_vec(); + let got = Keyring::Two.pair().to_raw_vec(); + assert_eq!(want, got); +} + +#[test] +fn authority_id_works() { + let store = keystore(); + + let alice: crypto::Public = + SyncCryptoStore::ecdsa_generate_new(&*store, KEY_TYPE, Some(&Keyring::Alice.to_seed())) + .ok() + .unwrap() + .into(); + + let bob = Keyring::Bob.public(); + let charlie = Keyring::Charlie.public(); + + let store: BeefyKeystore = Some(store).into(); + + let mut keys = vec![bob, charlie]; + + let id = store.authority_id(keys.as_slice()); + assert!(id.is_none()); + + keys.push(alice.clone()); + + let id = store.authority_id(keys.as_slice()).unwrap(); + assert_eq!(id, alice); +} + +#[test] +fn sign_works() { + let store = keystore(); + + let alice: crypto::Public = + SyncCryptoStore::ecdsa_generate_new(&*store, KEY_TYPE, Some(&Keyring::Alice.to_seed())) + .ok() + .unwrap() + .into(); + + let store: BeefyKeystore = Some(store).into(); + + let msg = b"are you involved or commited?"; + + let sig1 = store.sign(&alice, msg).unwrap(); + let sig2 = Keyring::Alice.sign(msg); + + assert_eq!(sig1, sig2); +} + +#[test] +fn sign_error() { + let store = keystore(); + + let _ = SyncCryptoStore::ecdsa_generate_new(&*store, KEY_TYPE, Some(&Keyring::Bob.to_seed())) + .ok() + .unwrap(); + + let store: BeefyKeystore = Some(store).into(); + + let alice = Keyring::Alice.public(); + + let msg = b"are you involved or commited?"; + let sig = store.sign(&alice, msg).err().unwrap(); + let err = Error::Signature("ecdsa_sign_prehashed() failed".to_string()); + + assert_eq!(sig, err); +} + +#[test] +fn sign_no_keystore() { + let store: BeefyKeystore = None.into(); + + let alice = Keyring::Alice.public(); + let msg = b"are you involved or commited"; + + let sig = store.sign(&alice, msg).err().unwrap(); + let err = Error::Keystore("no Keystore".to_string()); + assert_eq!(sig, err); +} + +#[test] +fn verify_works() { + let store = keystore(); + + let alice: crypto::Public = + SyncCryptoStore::ecdsa_generate_new(&*store, KEY_TYPE, Some(&Keyring::Alice.to_seed())) + .ok() + .unwrap() + .into(); + + let store: BeefyKeystore = Some(store).into(); + + // `msg` and `sig` match + let msg = b"are you involved or commited?"; + let sig = store.sign(&alice, msg).unwrap(); + assert!(BeefyKeystore::verify(&alice, &sig, msg)); + + // `msg and `sig` don't match + let msg = b"you are just involved"; + assert!(!BeefyKeystore::verify(&alice, &sig, msg)); +} + +// Note that we use keys with and without a seed for this test. +#[test] +fn public_keys_works() { + const TEST_TYPE: sp_application_crypto::KeyTypeId = sp_application_crypto::KeyTypeId(*b"test"); + + let store = keystore(); + + let add_key = |key_type, seed: Option<&str>| { + SyncCryptoStore::ecdsa_generate_new(&*store, key_type, seed).unwrap() + }; + + // test keys + let _ = add_key(TEST_TYPE, Some(Keyring::Alice.to_seed().as_str())); + let _ = add_key(TEST_TYPE, Some(Keyring::Bob.to_seed().as_str())); + + let _ = add_key(TEST_TYPE, None); + let _ = add_key(TEST_TYPE, None); + + // BEEFY keys + let _ = add_key(KEY_TYPE, Some(Keyring::Dave.to_seed().as_str())); + let _ = add_key(KEY_TYPE, Some(Keyring::Eve.to_seed().as_str())); + + let key1: crypto::Public = add_key(KEY_TYPE, None).into(); + let key2: crypto::Public = add_key(KEY_TYPE, None).into(); + + let store: BeefyKeystore = Some(store).into(); + + let keys = store.public_keys().ok().unwrap(); + + assert!(keys.len() == 4); + assert!(keys.contains(&Keyring::Dave.public())); + assert!(keys.contains(&Keyring::Eve.public())); + assert!(keys.contains(&key1)); + assert!(keys.contains(&key2)); +} diff --git a/client/beefy/src/lib.rs b/client/beefy/src/lib.rs new file mode 100644 index 0000000000000..b2372b2a6c518 --- /dev/null +++ b/client/beefy/src/lib.rs @@ -0,0 +1,159 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::sync::Arc; + +use log::debug; +use prometheus::Registry; + +use sc_client_api::{Backend, BlockchainEvents, Finalizer}; +use sc_network_gossip::{GossipEngine, Network as GossipNetwork}; + +use sp_api::ProvideRuntimeApi; +use sp_blockchain::HeaderBackend; +use sp_keystore::SyncCryptoStorePtr; +use sp_runtime::traits::Block; + +use beefy_primitives::BeefyApi; + +mod error; +mod gossip; +mod keystore; +mod metrics; +mod round; +mod worker; + +pub mod notification; + +pub const BEEFY_PROTOCOL_NAME: &str = "/paritytech/beefy/1"; + +/// Returns the configuration value to put in +/// [`sc_network::config::NetworkConfiguration::extra_sets`]. +pub fn beefy_peers_set_config() -> sc_network::config::NonDefaultSetConfig { + let mut cfg = + sc_network::config::NonDefaultSetConfig::new(BEEFY_PROTOCOL_NAME.into(), 1024 * 1024); + cfg.allow_non_reserved(25, 25); + cfg +} + +/// A convenience BEEFY client trait that defines all the type bounds a BEEFY client +/// has to satisfy. Ideally that should actually be a trait alias. Unfortunately as +/// of today, Rust does not allow a type alias to be used as a trait bound. Tracking +/// issue is . +pub trait Client: + BlockchainEvents + HeaderBackend + Finalizer + ProvideRuntimeApi + Send + Sync +where + B: Block, + BE: Backend, +{ + // empty +} + +impl Client for T +where + B: Block, + BE: Backend, + T: BlockchainEvents + + HeaderBackend + + Finalizer + + ProvideRuntimeApi + + Send + + Sync, +{ + // empty +} + +/// BEEFY gadget initialization parameters. +pub struct BeefyParams +where + B: Block, + BE: Backend, + C: Client, + C::Api: BeefyApi, + N: GossipNetwork + Clone + Send + 'static, +{ + /// BEEFY client + pub client: Arc, + /// Client Backend + pub backend: Arc, + /// Local key store + pub key_store: Option, + /// Gossip network + pub network: N, + /// BEEFY signed commitment sender + pub signed_commitment_sender: notification::BeefySignedCommitmentSender, + /// Minimal delta between blocks, BEEFY should vote for + pub min_block_delta: u32, + /// Prometheus metric registry + pub prometheus_registry: Option, +} + +/// Start the BEEFY gadget. +/// +/// This is a thin shim around running and awaiting a BEEFY worker. +pub async fn start_beefy_gadget(beefy_params: BeefyParams) +where + B: Block, + BE: Backend, + C: Client, + C::Api: BeefyApi, + N: GossipNetwork + Clone + Send + 'static, +{ + let BeefyParams { + client, + backend, + key_store, + network, + signed_commitment_sender, + min_block_delta, + prometheus_registry, + } = beefy_params; + + let gossip_validator = Arc::new(gossip::GossipValidator::new()); + let gossip_engine = + GossipEngine::new(network, BEEFY_PROTOCOL_NAME, gossip_validator.clone(), None); + + let metrics = + prometheus_registry.as_ref().map(metrics::Metrics::register).and_then( + |result| match result { + Ok(metrics) => { + debug!(target: "beefy", "🥩 Registered metrics"); + Some(metrics) + }, + Err(err) => { + debug!(target: "beefy", "🥩 Failed to register metrics: {:?}", err); + None + }, + }, + ); + + let worker_params = worker::WorkerParams { + client, + backend, + key_store: key_store.into(), + signed_commitment_sender, + gossip_engine, + gossip_validator, + min_block_delta, + metrics, + }; + + let worker = worker::BeefyWorker::<_, _, _>::new(worker_params); + + worker.run().await +} diff --git a/client/beefy/src/metrics.rs b/client/beefy/src/metrics.rs new file mode 100644 index 0000000000000..0fdc29f97c37a --- /dev/null +++ b/client/beefy/src/metrics.rs @@ -0,0 +1,93 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! BEEFY Prometheus metrics definition + +use prometheus::{register, Counter, Gauge, PrometheusError, Registry, U64}; + +/// BEEFY metrics exposed through Prometheus +pub(crate) struct Metrics { + /// Current active validator set id + pub beefy_validator_set_id: Gauge, + /// Total number of votes sent by this node + pub beefy_votes_sent: Counter, + /// Most recent concluded voting round + pub beefy_round_concluded: Gauge, + /// Best block finalized by BEEFY + pub beefy_best_block: Gauge, + /// Next block BEEFY should vote on + pub beefy_should_vote_on: Gauge, + /// Number of sessions without a signed commitment + pub beefy_skipped_sessions: Counter, +} + +impl Metrics { + pub(crate) fn register(registry: &Registry) -> Result { + Ok(Self { + beefy_validator_set_id: register( + Gauge::new("beefy_validator_set_id", "Current BEEFY active validator set id.")?, + registry, + )?, + beefy_votes_sent: register( + Counter::new("beefy_votes_sent", "Number of votes sent by this node")?, + registry, + )?, + beefy_round_concluded: register( + Gauge::new("beefy_round_concluded", "Voting round, that has been concluded")?, + registry, + )?, + beefy_best_block: register( + Gauge::new("beefy_best_block", "Best block finalized by BEEFY")?, + registry, + )?, + beefy_should_vote_on: register( + Gauge::new("beefy_should_vote_on", "Next block, BEEFY should vote on")?, + registry, + )?, + beefy_skipped_sessions: register( + Counter::new( + "beefy_skipped_sessions", + "Number of sessions without a signed commitment", + )?, + registry, + )?, + }) + } +} + +// Note: we use the `format` macro to convert an expr into a `u64`. This will fail, +// if expr does not derive `Display`. +#[macro_export] +macro_rules! metric_set { + ($self:ident, $m:ident, $v:expr) => {{ + let val: u64 = format!("{}", $v).parse().unwrap(); + + if let Some(metrics) = $self.metrics.as_ref() { + metrics.$m.set(val); + } + }}; +} + +#[macro_export] +macro_rules! metric_inc { + ($self:ident, $m:ident) => {{ + if let Some(metrics) = $self.metrics.as_ref() { + metrics.$m.inc(); + } + }}; +} diff --git a/client/beefy/src/notification.rs b/client/beefy/src/notification.rs new file mode 100644 index 0000000000000..6099c9681447b --- /dev/null +++ b/client/beefy/src/notification.rs @@ -0,0 +1,113 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::sync::Arc; + +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use sp_runtime::traits::{Block, NumberFor}; + +use parking_lot::Mutex; + +/// Stream of signed commitments returned when subscribing. +pub type SignedCommitment = + beefy_primitives::SignedCommitment, beefy_primitives::MmrRootHash>; + +/// Stream of signed commitments returned when subscribing. +type SignedCommitmentStream = TracingUnboundedReceiver>; + +/// Sending endpoint for notifying about signed commitments. +type SignedCommitmentSender = TracingUnboundedSender>; + +/// Collection of channel sending endpoints shared with the receiver side so they can register +/// themselves. +type SharedSignedCommitmentSenders = Arc>>>; + +/// The sending half of the signed commitment channel(s). +/// +/// Used to send notifications about signed commitments generated at the end of a BEEFY round. +#[derive(Clone)] +pub struct BeefySignedCommitmentSender +where + B: Block, +{ + subscribers: SharedSignedCommitmentSenders, +} + +impl BeefySignedCommitmentSender +where + B: Block, +{ + /// The `subscribers` should be shared with a corresponding `SignedCommitmentSender`. + fn new(subscribers: SharedSignedCommitmentSenders) -> Self { + Self { subscribers } + } + + /// Send out a notification to all subscribers that a new signed commitment is available for a + /// block. + pub fn notify(&self, signed_commitment: SignedCommitment) { + let mut subscribers = self.subscribers.lock(); + + // do an initial prune on closed subscriptions + subscribers.retain(|n| !n.is_closed()); + + if !subscribers.is_empty() { + subscribers.retain(|n| n.unbounded_send(signed_commitment.clone()).is_ok()); + } + } +} + +/// The receiving half of the signed commitments channel. +/// +/// Used to receive notifications about signed commitments generated at the end of a BEEFY round. +/// The `BeefySignedCommitmentStream` entity stores the `SharedSignedCommitmentSenders` so it can be +/// used to add more subscriptions. +#[derive(Clone)] +pub struct BeefySignedCommitmentStream +where + B: Block, +{ + subscribers: SharedSignedCommitmentSenders, +} + +impl BeefySignedCommitmentStream +where + B: Block, +{ + /// Creates a new pair of receiver and sender of signed commitment notifications. + pub fn channel() -> (BeefySignedCommitmentSender, Self) { + let subscribers = Arc::new(Mutex::new(vec![])); + let receiver = BeefySignedCommitmentStream::new(subscribers.clone()); + let sender = BeefySignedCommitmentSender::new(subscribers); + (sender, receiver) + } + + /// Create a new receiver of signed commitment notifications. + /// + /// The `subscribers` should be shared with a corresponding `BeefySignedCommitmentSender`. + fn new(subscribers: SharedSignedCommitmentSenders) -> Self { + Self { subscribers } + } + + /// Subscribe to a channel through which signed commitments are sent at the end of each BEEFY + /// voting round. + pub fn subscribe(&self) -> SignedCommitmentStream { + let (sender, receiver) = tracing_unbounded("mpsc_signed_commitments_notification_stream"); + self.subscribers.lock().push(sender); + receiver + } +} diff --git a/client/beefy/src/round.rs b/client/beefy/src/round.rs new file mode 100644 index 0000000000000..7d443603b364e --- /dev/null +++ b/client/beefy/src/round.rs @@ -0,0 +1,121 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::{collections::BTreeMap, hash::Hash}; + +use log::{debug, trace}; + +use beefy_primitives::{ + crypto::{Public, Signature}, + ValidatorSet, ValidatorSetId, +}; +use sp_arithmetic::traits::AtLeast32BitUnsigned; +use sp_runtime::traits::MaybeDisplay; + +#[derive(Default)] +struct RoundTracker { + votes: Vec<(Public, Signature)>, +} + +impl RoundTracker { + fn add_vote(&mut self, vote: (Public, Signature)) -> bool { + // this needs to handle equivocations in the future + if self.votes.contains(&vote) { + return false + } + + self.votes.push(vote); + true + } + + fn is_done(&self, threshold: usize) -> bool { + self.votes.len() >= threshold + } +} + +fn threshold(authorities: usize) -> usize { + let faulty = authorities.saturating_sub(1) / 3; + authorities - faulty +} + +pub(crate) struct Rounds { + rounds: BTreeMap<(Hash, Number), RoundTracker>, + validator_set: ValidatorSet, +} + +impl Rounds +where + H: Ord + Hash, + N: Ord + AtLeast32BitUnsigned + MaybeDisplay, +{ + pub(crate) fn new(validator_set: ValidatorSet) -> Self { + Rounds { rounds: BTreeMap::new(), validator_set } + } +} + +impl Rounds +where + H: Ord + Hash, + N: Ord + AtLeast32BitUnsigned + MaybeDisplay, +{ + pub(crate) fn validator_set_id(&self) -> ValidatorSetId { + self.validator_set.id + } + + pub(crate) fn validators(&self) -> Vec { + self.validator_set.validators.clone() + } + + pub(crate) fn add_vote(&mut self, round: (H, N), vote: (Public, Signature)) -> bool { + self.rounds.entry(round).or_default().add_vote(vote) + } + + pub(crate) fn is_done(&self, round: &(H, N)) -> bool { + let done = self + .rounds + .get(round) + .map(|tracker| tracker.is_done(threshold(self.validator_set.validators.len()))) + .unwrap_or(false); + + debug!(target: "beefy", "🥩 Round #{} done: {}", round.1, done); + + done + } + + pub(crate) fn drop(&mut self, round: &(H, N)) -> Option>> { + trace!(target: "beefy", "🥩 About to drop round #{}", round.1); + + let signatures = self.rounds.remove(round)?.votes; + + Some( + self.validator_set + .validators + .iter() + .map(|authority_id| { + signatures.iter().find_map(|(id, sig)| { + if id == authority_id { + Some(sig.clone()) + } else { + None + } + }) + }) + .collect(), + ) + } +} diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs new file mode 100644 index 0000000000000..3f52686930332 --- /dev/null +++ b/client/beefy/src/worker.rs @@ -0,0 +1,534 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::{collections::BTreeSet, fmt::Debug, marker::PhantomData, sync::Arc}; + +use codec::{Codec, Decode, Encode}; +use futures::{future, FutureExt, StreamExt}; +use log::{debug, error, info, trace, warn}; +use parking_lot::Mutex; + +use sc_client_api::{Backend, FinalityNotification, FinalityNotifications}; +use sc_network_gossip::GossipEngine; + +use sp_api::BlockId; +use sp_arithmetic::traits::AtLeast32Bit; +use sp_runtime::{ + generic::OpaqueDigestItemId, + traits::{Block, Header, NumberFor}, + SaturatedConversion, +}; + +use beefy_primitives::{ + crypto::{AuthorityId, Public, Signature}, + BeefyApi, Commitment, ConsensusLog, MmrRootHash, SignedCommitment, ValidatorSet, + VersionedCommitment, VoteMessage, BEEFY_ENGINE_ID, GENESIS_AUTHORITY_SET_ID, +}; + +use crate::{ + error, + gossip::{topic, GossipValidator}, + keystore::BeefyKeystore, + metric_inc, metric_set, + metrics::Metrics, + notification, round, Client, +}; + +pub(crate) struct WorkerParams +where + B: Block, +{ + pub client: Arc, + pub backend: Arc, + pub key_store: BeefyKeystore, + pub signed_commitment_sender: notification::BeefySignedCommitmentSender, + pub gossip_engine: GossipEngine, + pub gossip_validator: Arc>, + pub min_block_delta: u32, + pub metrics: Option, +} + +/// A BEEFY worker plays the BEEFY protocol +pub(crate) struct BeefyWorker +where + B: Block, + BE: Backend, + C: Client, +{ + client: Arc, + backend: Arc, + key_store: BeefyKeystore, + signed_commitment_sender: notification::BeefySignedCommitmentSender, + gossip_engine: Arc>>, + gossip_validator: Arc>, + /// Min delta in block numbers between two blocks, BEEFY should vote on + min_block_delta: u32, + metrics: Option, + rounds: round::Rounds>, + finality_notifications: FinalityNotifications, + /// Best block we received a GRANDPA notification for + best_grandpa_block: NumberFor, + /// Best block a BEEFY voting round has been concluded for + best_beefy_block: Option>, + /// Validator set id for the last signed commitment + last_signed_id: u64, + // keep rustc happy + _backend: PhantomData, +} + +impl BeefyWorker +where + B: Block + Codec, + BE: Backend, + C: Client, + C::Api: BeefyApi, +{ + /// Return a new BEEFY worker instance. + /// + /// Note that a BEEFY worker is only fully functional if a corresponding + /// BEEFY pallet has been deployed on-chain. + /// + /// The BEEFY pallet is needed in order to keep track of the BEEFY authority set. + pub(crate) fn new(worker_params: WorkerParams) -> Self { + let WorkerParams { + client, + backend, + key_store, + signed_commitment_sender, + gossip_engine, + gossip_validator, + min_block_delta, + metrics, + } = worker_params; + + BeefyWorker { + client: client.clone(), + backend, + key_store, + signed_commitment_sender, + gossip_engine: Arc::new(Mutex::new(gossip_engine)), + gossip_validator, + min_block_delta, + metrics, + rounds: round::Rounds::new(ValidatorSet::empty()), + finality_notifications: client.finality_notification_stream(), + best_grandpa_block: client.info().finalized_number, + best_beefy_block: None, + last_signed_id: 0, + _backend: PhantomData, + } + } +} + +impl BeefyWorker +where + B: Block, + BE: Backend, + C: Client, + C::Api: BeefyApi, +{ + /// Return `true`, if we should vote on block `number` + fn should_vote_on(&self, number: NumberFor) -> bool { + let best_beefy_block = if let Some(block) = self.best_beefy_block { + block + } else { + debug!(target: "beefy", "🥩 Missing best BEEFY block - won't vote for: {:?}", number); + return false + }; + + let target = vote_target(self.best_grandpa_block, best_beefy_block, self.min_block_delta); + + trace!(target: "beefy", "🥩 should_vote_on: #{:?}, next_block_to_vote_on: #{:?}", number, target); + + metric_set!(self, beefy_should_vote_on, target); + + number == target + } + + /// Return the current active validator set at header `header`. + /// + /// Note that the validator set could be `None`. This is the case if we don't find + /// a BEEFY authority set change and we can't fetch the authority set from the + /// BEEFY on-chain state. + /// + /// Such a failure is usually an indication that the BEEFY pallet has not been deployed (yet). + fn validator_set(&self, header: &B::Header) -> Option> { + let new = if let Some(new) = find_authorities_change::(header) { + Some(new) + } else { + let at = BlockId::hash(header.hash()); + self.client.runtime_api().validator_set(&at).ok() + }; + + trace!(target: "beefy", "🥩 active validator set: {:?}", new); + + new + } + + /// Verify `active` validator set for `block` against the key store + /// + /// The critical case is, if we do have a public key in the key store which is not + /// part of the active validator set. + /// + /// Note that for a non-authority node there will be no keystore, and we will + /// return an error and don't check. The error can usually be ignored. + fn verify_validator_set( + &self, + block: &NumberFor, + mut active: ValidatorSet, + ) -> Result<(), error::Error> { + let active: BTreeSet = active.validators.drain(..).collect(); + + let store: BTreeSet = self.key_store.public_keys()?.drain(..).collect(); + + let missing: Vec<_> = store.difference(&active).cloned().collect(); + + if !missing.is_empty() { + debug!(target: "beefy", "🥩 for block {:?} public key missing in validator set: {:?}", block, missing); + } + + Ok(()) + } + + fn handle_finality_notification(&mut self, notification: FinalityNotification) { + trace!(target: "beefy", "🥩 Finality notification: {:?}", notification); + + // update best GRANDPA finalized block we have seen + self.best_grandpa_block = *notification.header.number(); + + if let Some(active) = self.validator_set(¬ification.header) { + // Authority set change or genesis set id triggers new voting rounds + // + // TODO: (adoerr) Enacting a new authority set will also implicitly 'conclude' + // the currently active BEEFY voting round by starting a new one. This is + // temporary and needs to be replaced by proper round life cycle handling. + if active.id != self.rounds.validator_set_id() || + (active.id == GENESIS_AUTHORITY_SET_ID && self.best_beefy_block.is_none()) + { + debug!(target: "beefy", "🥩 New active validator set id: {:?}", active); + metric_set!(self, beefy_validator_set_id, active.id); + + // BEEFY should produce a signed commitment for each session + if active.id != self.last_signed_id + 1 && active.id != GENESIS_AUTHORITY_SET_ID { + metric_inc!(self, beefy_skipped_sessions); + } + + // verify the new validator set + let _ = self.verify_validator_set(notification.header.number(), active.clone()); + + self.rounds = round::Rounds::new(active.clone()); + + debug!(target: "beefy", "🥩 New Rounds for id: {:?}", active.id); + + self.best_beefy_block = Some(*notification.header.number()); + + // this metric is kind of 'fake'. Best BEEFY block should only be updated once we + // have a signed commitment for the block. Remove once the above TODO is done. + metric_set!(self, beefy_best_block, *notification.header.number()); + } + } + + if self.should_vote_on(*notification.header.number()) { + let authority_id = if let Some(id) = + self.key_store.authority_id(self.rounds.validators().as_slice()) + { + debug!(target: "beefy", "🥩 Local authority id: {:?}", id); + id + } else { + debug!(target: "beefy", "🥩 Missing validator id - can't vote for: {:?}", notification.header.hash()); + return + }; + + let mmr_root = + if let Some(hash) = find_mmr_root_digest::(¬ification.header) { + hash + } else { + warn!(target: "beefy", "🥩 No MMR root digest found for: {:?}", notification.header.hash()); + return + }; + + let commitment = Commitment { + payload: mmr_root, + block_number: notification.header.number(), + validator_set_id: self.rounds.validator_set_id(), + }; + let encoded_commitment = commitment.encode(); + + let signature = match self.key_store.sign(&authority_id, &*encoded_commitment) { + Ok(sig) => sig, + Err(err) => { + warn!(target: "beefy", "🥩 Error signing commitment: {:?}", err); + return + }, + }; + + trace!( + target: "beefy", + "🥩 Produced signature using {:?}, is_valid: {:?}", + authority_id, + BeefyKeystore::verify(&authority_id, &signature, &*encoded_commitment) + ); + + let message = VoteMessage { commitment, id: authority_id, signature }; + + let encoded_message = message.encode(); + + metric_inc!(self, beefy_votes_sent); + + debug!(target: "beefy", "🥩 Sent vote message: {:?}", message); + + self.handle_vote( + (message.commitment.payload, *message.commitment.block_number), + (message.id, message.signature), + ); + + self.gossip_engine.lock().gossip_message(topic::(), encoded_message, false); + } + } + + fn handle_vote(&mut self, round: (MmrRootHash, NumberFor), vote: (Public, Signature)) { + self.gossip_validator.note_round(round.1); + + let vote_added = self.rounds.add_vote(round, vote); + + if vote_added && self.rounds.is_done(&round) { + if let Some(signatures) = self.rounds.drop(&round) { + // id is stored for skipped session metric calculation + self.last_signed_id = self.rounds.validator_set_id(); + + let commitment = Commitment { + payload: round.0, + block_number: round.1, + validator_set_id: self.last_signed_id, + }; + + let signed_commitment = SignedCommitment { commitment, signatures }; + + metric_set!(self, beefy_round_concluded, round.1); + + info!(target: "beefy", "🥩 Round #{} concluded, committed: {:?}.", round.1, signed_commitment); + + if self + .backend + .append_justification( + BlockId::Number(round.1), + ( + BEEFY_ENGINE_ID, + VersionedCommitment::V1(signed_commitment.clone()).encode(), + ), + ) + .is_err() + { + // just a trace, because until the round lifecycle is improved, we will + // conclude certain rounds multiple times. + trace!(target: "beefy", "🥩 Failed to append justification: {:?}", signed_commitment); + } + + self.signed_commitment_sender.notify(signed_commitment); + self.best_beefy_block = Some(round.1); + + metric_set!(self, beefy_best_block, round.1); + } + } + } + + pub(crate) async fn run(mut self) { + let mut votes = Box::pin(self.gossip_engine.lock().messages_for(topic::()).filter_map( + |notification| async move { + debug!(target: "beefy", "🥩 Got vote message: {:?}", notification); + + VoteMessage::, Public, Signature>::decode( + &mut ¬ification.message[..], + ) + .ok() + }, + )); + + loop { + let engine = self.gossip_engine.clone(); + let gossip_engine = future::poll_fn(|cx| engine.lock().poll_unpin(cx)); + + futures::select! { + notification = self.finality_notifications.next().fuse() => { + if let Some(notification) = notification { + self.handle_finality_notification(notification); + } else { + return; + } + }, + vote = votes.next().fuse() => { + if let Some(vote) = vote { + self.handle_vote( + (vote.commitment.payload, vote.commitment.block_number), + (vote.id, vote.signature), + ); + } else { + return; + } + }, + _ = gossip_engine.fuse() => { + error!(target: "beefy", "🥩 Gossip engine has terminated."); + return; + } + } + } + } +} + +/// Extract the MMR root hash from a digest in the given header, if it exists. +fn find_mmr_root_digest(header: &B::Header) -> Option +where + B: Block, + Id: Codec, +{ + header.digest().logs().iter().find_map(|log| { + match log.try_to::>(OpaqueDigestItemId::Consensus(&BEEFY_ENGINE_ID)) { + Some(ConsensusLog::MmrRoot(root)) => Some(root), + _ => None, + } + }) +} + +/// Scan the `header` digest log for a BEEFY validator set change. Return either the new +/// validator set or `None` in case no validator set change has been signaled. +fn find_authorities_change(header: &B::Header) -> Option> +where + B: Block, +{ + let id = OpaqueDigestItemId::Consensus(&BEEFY_ENGINE_ID); + + let filter = |log: ConsensusLog| match log { + ConsensusLog::AuthoritiesChange(validator_set) => Some(validator_set), + _ => None, + }; + + header.digest().convert_first(|l| l.try_to(id).and_then(filter)) +} + +/// Calculate next block number to vote on +fn vote_target(best_grandpa: N, best_beefy: N, min_delta: u32) -> N +where + N: AtLeast32Bit + Copy + Debug, +{ + let diff = best_grandpa.saturating_sub(best_beefy); + let diff = diff.saturated_into::(); + let target = best_beefy + min_delta.max(diff.next_power_of_two()).into(); + + trace!( + target: "beefy", + "🥩 vote target - diff: {:?}, next_power_of_two: {:?}, target block: #{:?}", + diff, + diff.next_power_of_two(), + target, + ); + + target +} + +#[cfg(test)] +mod tests { + use super::vote_target; + + #[test] + fn vote_on_min_block_delta() { + let t = vote_target(1u32, 0, 4); + assert_eq!(4, t); + let t = vote_target(2u32, 0, 4); + assert_eq!(4, t); + let t = vote_target(3u32, 0, 4); + assert_eq!(4, t); + let t = vote_target(4u32, 0, 4); + assert_eq!(4, t); + + let t = vote_target(4u32, 4, 4); + assert_eq!(8, t); + + let t = vote_target(10u32, 10, 4); + assert_eq!(14, t); + let t = vote_target(11u32, 10, 4); + assert_eq!(14, t); + let t = vote_target(12u32, 10, 4); + assert_eq!(14, t); + let t = vote_target(13u32, 10, 4); + assert_eq!(14, t); + + let t = vote_target(10u32, 10, 8); + assert_eq!(18, t); + let t = vote_target(11u32, 10, 8); + assert_eq!(18, t); + let t = vote_target(12u32, 10, 8); + assert_eq!(18, t); + let t = vote_target(13u32, 10, 8); + assert_eq!(18, t); + } + + #[test] + fn vote_on_power_of_two() { + let t = vote_target(1008u32, 1000, 4); + assert_eq!(1008, t); + + let t = vote_target(1016u32, 1000, 4); + assert_eq!(1016, t); + + let t = vote_target(1032u32, 1000, 4); + assert_eq!(1032, t); + + let t = vote_target(1064u32, 1000, 4); + assert_eq!(1064, t); + + let t = vote_target(1128u32, 1000, 4); + assert_eq!(1128, t); + + let t = vote_target(1256u32, 1000, 4); + assert_eq!(1256, t); + + let t = vote_target(1512u32, 1000, 4); + assert_eq!(1512, t); + + let t = vote_target(1024u32, 0, 4); + assert_eq!(1024, t); + } + + #[test] + fn vote_on_target_block() { + let t = vote_target(1008u32, 1002, 4); + assert_eq!(1010, t); + let t = vote_target(1010u32, 1002, 4); + assert_eq!(1010, t); + + let t = vote_target(1016u32, 1006, 4); + assert_eq!(1022, t); + let t = vote_target(1022u32, 1006, 4); + assert_eq!(1022, t); + + let t = vote_target(1032u32, 1012, 4); + assert_eq!(1044, t); + let t = vote_target(1044u32, 1012, 4); + assert_eq!(1044, t); + + let t = vote_target(1064u32, 1014, 4); + assert_eq!(1078, t); + let t = vote_target(1078u32, 1014, 4); + assert_eq!(1078, t); + + let t = vote_target(1128u32, 1008, 4); + assert_eq!(1136, t); + let t = vote_target(1136u32, 1008, 4); + assert_eq!(1136, t); + } +} diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index fcdb053c47c16..ff3a99760bd28 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -163,6 +163,7 @@ struct ClientSpec { // Never used, left only for backward compatibility. consensus_engine: (), #[serde(skip_serializing)] + #[allow(unused)] genesis: serde::de::IgnoredAny, /// Mapping from `block_hash` to `wasm_code`. /// diff --git a/client/consensus/common/src/longest_chain.rs b/client/consensus/common/src/longest_chain.rs index b1f7f94f9eb28..7ec91a5ad87e9 100644 --- a/client/consensus/common/src/longest_chain.rs +++ b/client/consensus/common/src/longest_chain.rs @@ -91,11 +91,12 @@ where &self, target_hash: Block::Hash, maybe_max_number: Option>, - ) -> Result, ConsensusError> { + ) -> Result { let import_lock = self.backend.get_import_lock(); self.backend .blockchain() .best_containing(target_hash, maybe_max_number, import_lock) + .map(|maybe_hash| maybe_hash.unwrap_or(target_hash)) .map_err(|e| ConsensusError::ChainLookup(e.to_string()).into()) } } diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index a895324a2e7b9..5fef0e5b12d08 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -1418,6 +1418,7 @@ mod qc { #[derive(Debug, Clone)] struct Node { hash: H256, + #[allow(unused)] parent: H256, state: KeyMap, changes: KeySet, diff --git a/client/executor/common/src/runtime_blob/globals_snapshot.rs b/client/executor/common/src/runtime_blob/globals_snapshot.rs index 6a29ff8bae365..a25fa6f9fd639 100644 --- a/client/executor/common/src/runtime_blob/globals_snapshot.rs +++ b/client/executor/common/src/runtime_blob/globals_snapshot.rs @@ -34,14 +34,14 @@ pub trait InstanceGlobals { /// Get a handle to a global by it's export name. /// /// The requested export is must exist in the exported list, and it should be a mutable global. - fn get_global(&self, export_name: &str) -> Self::Global; + fn get_global(&mut self, export_name: &str) -> Self::Global; /// Get the current value of the global. - fn get_global_value(&self, global: &Self::Global) -> sp_wasm_interface::Value; + fn get_global_value(&mut self, global: &Self::Global) -> sp_wasm_interface::Value; /// Update the current value of the global. /// /// The global behind the handle is guaranteed to be mutable and the value to be the same type /// as the global. - fn set_global_value(&self, global: &Self::Global, value: sp_wasm_interface::Value); + fn set_global_value(&mut self, global: &Self::Global, value: sp_wasm_interface::Value); } /// A set of exposed mutable globals. @@ -79,7 +79,10 @@ impl GlobalsSnapshot { /// /// This function panics if the instance doesn't correspond to the module from which the /// [`ExposedMutableGlobalsSet`] was collected. - pub fn take(mutable_globals: &ExposedMutableGlobalsSet, instance: &Instance) -> Self + pub fn take( + mutable_globals: &ExposedMutableGlobalsSet, + instance: &mut Instance, + ) -> Self where Instance: InstanceGlobals, { @@ -98,7 +101,7 @@ impl GlobalsSnapshot { /// Apply the snapshot to the given instance. /// /// This instance must be the same that was used for creation of this snapshot. - pub fn apply(&self, instance: &Instance) + pub fn apply(&self, instance: &mut Instance) where Instance: InstanceGlobals, { diff --git a/client/executor/common/src/util.rs b/client/executor/common/src/util.rs index 3ea29540f98ee..ffbeb8c7ab533 100644 --- a/client/executor/common/src/util.rs +++ b/client/executor/common/src/util.rs @@ -233,7 +233,7 @@ pub mod wasmer { let range = checked_range(dest_addr.into(), source.len(), destination.len()) .ok_or_else(|| Error::Other("memory write is out of bounds".into()))?; - &mut destination[range].copy_from_slice(source); + destination[range].copy_from_slice(source); Ok(()) } } diff --git a/client/executor/common/src/wasm_runtime.rs b/client/executor/common/src/wasm_runtime.rs index eb73909d9234f..1e9f1225518a3 100644 --- a/client/executor/common/src/wasm_runtime.rs +++ b/client/executor/common/src/wasm_runtime.rs @@ -78,21 +78,21 @@ pub trait WasmInstance: Send { /// Before execution, instance is reset. /// /// Returns the encoded result on success. - fn call(&self, method: InvokeMethod, data: &[u8]) -> Result, Error>; + fn call(&mut self, method: InvokeMethod, data: &[u8]) -> Result, Error>; /// Call an exported method on this WASM instance. /// /// Before execution, instance is reset. /// /// Returns the encoded result on success. - fn call_export(&self, method: &str, data: &[u8]) -> Result, Error> { + fn call_export(&mut self, method: &str, data: &[u8]) -> Result, Error> { self.call(method.into(), data) } /// Get the value from a global with the given `name`. /// /// This method is only suitable for getting immutable globals. - fn get_global_const(&self, name: &str) -> Result, Error>; + fn get_global_const(&mut self, name: &str) -> Result, Error>; /// **Testing Only**. This function returns the base address of the linear memory. /// diff --git a/client/executor/runtime-test/src/lib.rs b/client/executor/runtime-test/src/lib.rs index c9f7d6b1e2970..2b5699fa3f77a 100644 --- a/client/executor/runtime-test/src/lib.rs +++ b/client/executor/runtime-test/src/lib.rs @@ -91,7 +91,7 @@ sp_core::wasm_export_functions! { // This function dirties the **host** pages. I.e. we dirty 4KiB at a time and it will take // 16 writes to process a single wasm page. - let mut heap_ptr = heap_base as usize; + let heap_ptr = heap_base as usize; // Find the next wasm page boundary. let heap_ptr = round_up_to(heap_ptr, 65536); @@ -234,7 +234,7 @@ sp_core::wasm_export_functions! { match instance.get_global_val("test_global") { Some(sp_sandbox::Value::I64(val)) => val, None => 30, - val => 40, + _ => 40, } } @@ -362,7 +362,7 @@ sp_core::wasm_export_functions! { // It is expected that the given pointer is not allocated. fn check_and_set_in_heap(heap_base: u32, offset: u32) { let test_message = b"Hello invalid heap memory"; - let ptr = unsafe { (heap_base + offset) as *mut u8 }; + let ptr = (heap_base + offset) as *mut u8; let message_slice = unsafe { sp_std::slice::from_raw_parts_mut(ptr, test_message.len()) }; diff --git a/client/executor/src/integration_tests/linux.rs b/client/executor/src/integration_tests/linux.rs index 7e0696973dc77..38e57707e9e6b 100644 --- a/client/executor/src/integration_tests/linux.rs +++ b/client/executor/src/integration_tests/linux.rs @@ -40,7 +40,7 @@ fn memory_consumption_compiled() { let runtime = mk_test_runtime(WasmExecutionMethod::Compiled, 1024); - let instance = runtime.new_instance().unwrap(); + let mut instance = runtime.new_instance().unwrap(); let heap_base = instance .get_global_const("__heap_base") .expect("`__heap_base` is valid") diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index dabead4799dc8..fe964f47ba374 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -467,7 +467,7 @@ test_wasm_execution!(returns_mutable_static); fn returns_mutable_static(wasm_method: WasmExecutionMethod) { let runtime = mk_test_runtime(wasm_method, 1024); - let instance = runtime.new_instance().unwrap(); + let mut instance = runtime.new_instance().unwrap(); let res = instance.call_export("returns_mutable_static", &[0]).unwrap(); assert_eq!(33, u64::decode(&mut &res[..]).unwrap()); @@ -482,7 +482,7 @@ test_wasm_execution!(returns_mutable_static_bss); fn returns_mutable_static_bss(wasm_method: WasmExecutionMethod) { let runtime = mk_test_runtime(wasm_method, 1024); - let instance = runtime.new_instance().unwrap(); + let mut instance = runtime.new_instance().unwrap(); let res = instance.call_export("returns_mutable_static_bss", &[0]).unwrap(); assert_eq!(1, u64::decode(&mut &res[..]).unwrap()); @@ -508,7 +508,7 @@ fn restoration_of_globals(wasm_method: WasmExecutionMethod) { const REQUIRED_MEMORY_PAGES: u64 = 32; let runtime = mk_test_runtime(wasm_method, REQUIRED_MEMORY_PAGES); - let instance = runtime.new_instance().unwrap(); + let mut instance = runtime.new_instance().unwrap(); // On the first invocation we allocate approx. 768KB (75%) of stack and then trap. let res = instance.call_export("allocates_huge_stack_array", &true.encode()); @@ -522,7 +522,7 @@ fn restoration_of_globals(wasm_method: WasmExecutionMethod) { test_wasm_execution!(interpreted_only heap_is_reset_between_calls); fn heap_is_reset_between_calls(wasm_method: WasmExecutionMethod) { let runtime = mk_test_runtime(wasm_method, 1024); - let instance = runtime.new_instance().unwrap(); + let mut instance = runtime.new_instance().unwrap(); let heap_base = instance .get_global_const("__heap_base") diff --git a/client/executor/src/native_executor.rs b/client/executor/src/native_executor.rs index 38dba55b5f87c..d912fc0fd13c9 100644 --- a/client/executor/src/native_executor.rs +++ b/client/executor/src/native_executor.rs @@ -101,8 +101,6 @@ pub struct WasmExecutor { host_functions: Arc>, /// WASM runtime cache. cache: Arc, - /// The size of the instances cache. - max_runtime_instances: usize, /// The path to a directory which the executor can leverage for a file cache, e.g. put there /// compiled artifacts. cache_path: Option, @@ -138,7 +136,6 @@ impl WasmExecutor { default_heap_pages: default_heap_pages.unwrap_or(DEFAULT_HEAP_PAGES), host_functions: Arc::new(host_functions), cache: Arc::new(RuntimeCache::new(max_runtime_instances, cache_path.clone())), - max_runtime_instances, cache_path, } } @@ -166,7 +163,7 @@ impl WasmExecutor { where F: FnOnce( AssertUnwindSafe<&Arc>, - AssertUnwindSafe<&dyn WasmInstance>, + AssertUnwindSafe<&mut dyn WasmInstance>, Option<&RuntimeVersion>, AssertUnwindSafe<&mut dyn Externalities>, ) -> Result>, @@ -192,7 +189,7 @@ impl WasmExecutor { /// Perform a call into the given runtime. /// - /// The runtime is passed as a [`RuntimeBlob`]. The runtime will be isntantiated with the + /// The runtime is passed as a [`RuntimeBlob`]. The runtime will be instantiated with the /// parameters this `WasmExecutor` was initialized with. /// /// In case of problems with during creation of the runtime or instantation, a `Err` is @@ -220,7 +217,7 @@ impl WasmExecutor { .new_instance() .map_err(|e| format!("Failed to create instance: {:?}", e))?; - let instance = AssertUnwindSafe(instance); + let mut instance = AssertUnwindSafe(instance); let mut ext = AssertUnwindSafe(ext); let module = AssertUnwindSafe(module); @@ -250,7 +247,7 @@ impl sp_core::traits::ReadRuntimeVersion for WasmExecutor { } // If the blob didn't have embedded runtime version section, we fallback to the legacy - // way of fetching the verison: i.e. instantiating the given instance and calling + // way of fetching the version: i.e. instantiating the given instance and calling // `Core_version` on it. self.uncached_call( @@ -286,7 +283,7 @@ impl CodeExecutor for WasmExecutor { runtime_code, ext, false, - |module, instance, _onchain_version, mut ext| { + |module, mut instance, _onchain_version, mut ext| { with_externalities_safe(&mut **ext, move || { preregister_builtin_ext(module.clone()); instance.call_export(method, data).map(NativeOrEncoded::Encoded) @@ -441,7 +438,7 @@ impl RuntimeSpawn for RuntimeInstanceSpawn { // pool of instances should be used. // // https://github.com/paritytech/substrate/issues/7354 - let instance = + let mut instance = module.new_instance().expect("Failed to create new instance from module"); instance @@ -528,7 +525,7 @@ impl CodeExecutor for NativeElseWasmExecut runtime_code, ext, false, - |module, instance, onchain_version, mut ext| { + |module, mut instance, onchain_version, mut ext| { let onchain_version = onchain_version.ok_or_else(|| Error::ApiError("Unknown version".into()))?; diff --git a/client/executor/src/wasm_runtime.rs b/client/executor/src/wasm_runtime.rs index b11e3958dbc81..b3a981d9e0821 100644 --- a/client/executor/src/wasm_runtime.rs +++ b/client/executor/src/wasm_runtime.rs @@ -76,7 +76,7 @@ impl VersionedRuntime { where F: FnOnce( &Arc, - &dyn WasmInstance, + &mut dyn WasmInstance, Option<&RuntimeVersion>, &mut dyn Externalities, ) -> Result, @@ -90,12 +90,12 @@ impl VersionedRuntime { match instance { Some((index, mut locked)) => { - let (instance, new_inst) = locked + let (mut instance, new_inst) = locked .take() .map(|r| Ok((r, false))) .unwrap_or_else(|| self.module.new_instance().map(|i| (i, true)))?; - let result = f(&self.module, &*instance, self.version.as_ref(), ext); + let result = f(&self.module, &mut *instance, self.version.as_ref(), ext); if let Err(e) = &result { if new_inst { log::warn!( @@ -129,9 +129,9 @@ impl VersionedRuntime { log::warn!(target: "wasm-runtime", "Ran out of free WASM instances"); // Allocate a new instance - let instance = self.module.new_instance()?; + let mut instance = self.module.new_instance()?; - f(&self.module, &*instance, self.version.as_ref(), ext) + f(&self.module, &mut *instance, self.version.as_ref(), ext) }, } } @@ -213,7 +213,7 @@ impl RuntimeCache { where F: FnOnce( &Arc, - &dyn WasmInstance, + &mut dyn WasmInstance, Option<&RuntimeVersion>, &mut dyn Externalities, ) -> Result, diff --git a/client/executor/wasmi/src/lib.rs b/client/executor/wasmi/src/lib.rs index 6052662fa7ccf..ceab07c2f71cb 100644 --- a/client/executor/wasmi/src/lib.rs +++ b/client/executor/wasmi/src/lib.rs @@ -736,7 +736,7 @@ pub struct WasmiInstance { unsafe impl Send for WasmiInstance {} impl WasmInstance for WasmiInstance { - fn call(&self, method: InvokeMethod, data: &[u8]) -> Result, Error> { + fn call(&mut self, method: InvokeMethod, data: &[u8]) -> Result, Error> { // We reuse a single wasm instance for multiple calls and a previous call (if any) // altered the state. Therefore, we need to restore the instance to original state. @@ -767,7 +767,7 @@ impl WasmInstance for WasmiInstance { ) } - fn get_global_const(&self, name: &str) -> Result, Error> { + fn get_global_const(&mut self, name: &str) -> Result, Error> { match self.instance.export_by_name(name) { Some(global) => Ok(Some( global diff --git a/client/executor/wasmtime/Cargo.toml b/client/executor/wasmtime/Cargo.toml index 3158cdecc3263..c122b3ab0e696 100644 --- a/client/executor/wasmtime/Cargo.toml +++ b/client/executor/wasmtime/Cargo.toml @@ -16,7 +16,6 @@ targets = ["x86_64-unknown-linux-gnu"] libc = "0.2.90" cfg-if = "1.0" log = "0.4.8" -scoped-tls = "1.0" parity-wasm = "0.42.0" codec = { package = "parity-scale-codec", version = "2.0.0" } sc-executor-common = { version = "0.10.0-dev", path = "../common" } @@ -24,8 +23,9 @@ sp-wasm-interface = { version = "4.0.0-dev", path = "../../../primitives/wasm-in sp-runtime-interface = { version = "4.0.0-dev", path = "../../../primitives/runtime-interface" } sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } sc-allocator = { version = "4.0.0-dev", path = "../../allocator" } -wasmtime = { version = "0.27.0", default-features = false, features = [ +wasmtime = { version = "0.29.0", default-features = false, features = [ "cache", + "jitdump", "parallel-compilation", ] } diff --git a/client/executor/wasmtime/src/host.rs b/client/executor/wasmtime/src/host.rs index 8453ec3954354..4edb9f9c423f0 100644 --- a/client/executor/wasmtime/src/host.rs +++ b/client/executor/wasmtime/src/host.rs @@ -19,7 +19,7 @@ //! This module defines `HostState` and `HostContext` structs which provide logic and state //! required for execution of host. -use crate::instance_wrapper::InstanceWrapper; +use crate::{instance_wrapper::InstanceWrapper, runtime::StoreData}; use codec::{Decode, Encode}; use log::trace; use sc_allocator::FreeingBumpHeapAllocator; @@ -31,7 +31,7 @@ use sc_executor_common::{ use sp_core::sandbox as sandbox_primitives; use sp_wasm_interface::{FunctionContext, MemoryId, Pointer, Sandbox, WordSize}; use std::{cell::RefCell, rc::Rc}; -use wasmtime::{Func, Val}; +use wasmtime::{Caller, Func, Val}; /// The state required to construct a HostContext context. The context only lasts for one host /// call, whereas the state is maintained for the duration of a Wasm runtime call, which may make @@ -64,45 +64,67 @@ impl HostState { } /// Materialize `HostContext` that can be used to invoke a substrate host `dyn Function`. - pub fn materialize<'a>(&'a self) -> HostContext<'a> { - HostContext(self) + pub(crate) fn materialize<'a, 'b, 'c>( + &'a self, + caller: &'b mut Caller<'c, StoreData>, + ) -> HostContext<'a, 'b, 'c> { + HostContext { host_state: self, caller } } } /// A `HostContext` implements `FunctionContext` for making host calls from a Wasmtime /// runtime. The `HostContext` exists only for the lifetime of the call and borrows state from /// a longer-living `HostState`. -pub struct HostContext<'a>(&'a HostState); +pub(crate) struct HostContext<'a, 'b, 'c> { + host_state: &'a HostState, + caller: &'b mut Caller<'c, StoreData>, +} -impl<'a> std::ops::Deref for HostContext<'a> { +impl<'a, 'b, 'c> std::ops::Deref for HostContext<'a, 'b, 'c> { type Target = HostState; fn deref(&self) -> &HostState { - self.0 + self.host_state } } -impl<'a> sp_wasm_interface::FunctionContext for HostContext<'a> { +impl<'a, 'b, 'c> sp_wasm_interface::FunctionContext for HostContext<'a, 'b, 'c> { fn read_memory_into( &self, address: Pointer, dest: &mut [u8], ) -> sp_wasm_interface::Result<()> { - self.instance.read_memory_into(address, dest).map_err(|e| e.to_string()) + let ctx = &self.caller; + self.host_state + .instance + .read_memory_into(ctx, address, dest) + .map_err(|e| e.to_string()) } fn write_memory(&mut self, address: Pointer, data: &[u8]) -> sp_wasm_interface::Result<()> { - self.instance.write_memory_from(address, data).map_err(|e| e.to_string()) + let ctx = &mut self.caller; + self.host_state + .instance + .write_memory_from(ctx, address, data) + .map_err(|e| e.to_string()) } fn allocate_memory(&mut self, size: WordSize) -> sp_wasm_interface::Result> { - self.instance - .allocate(&mut *self.allocator.borrow_mut(), size) + let ctx = &mut self.caller; + let allocator = &self.host_state.allocator; + + self.host_state + .instance + .allocate(ctx, &mut *allocator.borrow_mut(), size) .map_err(|e| e.to_string()) } fn deallocate_memory(&mut self, ptr: Pointer) -> sp_wasm_interface::Result<()> { - self.instance - .deallocate(&mut *self.allocator.borrow_mut(), ptr) + let ctx = &mut self.caller; + let allocator = &self.host_state.allocator; + + self.host_state + .instance + .deallocate(ctx, &mut *allocator.borrow_mut(), ptr) .map_err(|e| e.to_string()) } @@ -111,7 +133,7 @@ impl<'a> sp_wasm_interface::FunctionContext for HostContext<'a> { } } -impl<'a> Sandbox for HostContext<'a> { +impl<'a, 'b, 'c> Sandbox for HostContext<'a, 'b, 'c> { fn memory_get( &mut self, memory_id: MemoryId, @@ -129,7 +151,8 @@ impl<'a> Sandbox for HostContext<'a> { Ok(buffer) => buffer, }; - if let Err(_) = self.instance.write_memory_from(buf_ptr, &buffer) { + let instance = self.instance.clone(); + if let Err(_) = instance.write_memory_from(&mut self.caller, buf_ptr, &buffer) { return Ok(sandbox_primitives::ERR_OUT_OF_BOUNDS) } @@ -148,7 +171,7 @@ impl<'a> Sandbox for HostContext<'a> { let len = val_len as usize; - let buffer = match self.instance.read_memory(val_ptr, len) { + let buffer = match self.instance.read_memory(&self.caller, val_ptr, len) { Err(_) => return Ok(sandbox_primitives::ERR_OUT_OF_BOUNDS), Ok(buffer) => buffer, }; @@ -241,12 +264,14 @@ impl<'a> Sandbox for HostContext<'a> { ) -> sp_wasm_interface::Result { // Extract a dispatch thunk from the instance's table by the specified index. let dispatch_thunk = { + let ctx = &mut self.caller; let table_item = self + .host_state .instance .table() .as_ref() .ok_or_else(|| "Runtime doesn't have a table; sandbox is unavailable")? - .get(dispatch_thunk_id); + .get(ctx, dispatch_thunk_id); table_item .ok_or_else(|| "dispatch_thunk_id is out of bounds")? @@ -295,12 +320,12 @@ impl<'a> Sandbox for HostContext<'a> { } } -struct SandboxContext<'a, 'b> { - host_context: &'a mut HostContext<'b>, +struct SandboxContext<'a, 'b, 'c, 'd> { + host_context: &'a mut HostContext<'b, 'c, 'd>, dispatch_thunk: Func, } -impl<'a, 'b> sandbox::SandboxContext for SandboxContext<'a, 'b> { +impl<'a, 'b, 'c, 'd> sandbox::SandboxContext for SandboxContext<'a, 'b, 'c, 'd> { fn invoke( &mut self, invoke_args_ptr: Pointer, @@ -308,12 +333,16 @@ impl<'a, 'b> sandbox::SandboxContext for SandboxContext<'a, 'b> { state: u32, func_idx: SupervisorFuncIndex, ) -> Result { - let result = self.dispatch_thunk.call(&[ - Val::I32(u32::from(invoke_args_ptr) as i32), - Val::I32(invoke_args_len as i32), - Val::I32(state as i32), - Val::I32(usize::from(func_idx) as i32), - ]); + let result = self.dispatch_thunk.call( + &mut self.host_context.caller, + &[ + Val::I32(u32::from(invoke_args_ptr) as i32), + Val::I32(invoke_args_len as i32), + Val::I32(state as i32), + Val::I32(usize::from(func_idx) as i32), + ], + ); + match result { Ok(ret_vals) => { let ret_val = if ret_vals.len() != 1 { diff --git a/client/executor/wasmtime/src/imports.rs b/client/executor/wasmtime/src/imports.rs index b27fb944bc030..a9ef6e1f58a70 100644 --- a/client/executor/wasmtime/src/imports.rs +++ b/client/executor/wasmtime/src/imports.rs @@ -16,12 +16,15 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{state_holder, util}; +use crate::{ + runtime::{Store, StoreData}, + util, +}; use sc_executor_common::error::WasmError; use sp_wasm_interface::{Function, ValueType}; use std::any::Any; use wasmtime::{ - Extern, ExternType, Func, FuncType, ImportType, Limits, Memory, MemoryType, Module, Store, + Caller, Extern, ExternType, Func, FuncType, ImportType, Limits, Memory, MemoryType, Module, Trap, Val, }; @@ -34,8 +37,8 @@ pub struct Imports { /// Goes over all imports of a module and prepares a vector of `Extern`s that can be used for /// instantiation of the module. Returns an error if there are imports that cannot be satisfied. -pub fn resolve_imports( - store: &Store, +pub(crate) fn resolve_imports( + store: &mut Store, module: &Module, host_functions: &[&'static dyn Function], heap_pages: u32, @@ -78,7 +81,7 @@ fn import_name<'a, 'b: 'a>(import: &'a ImportType<'b>) -> Result<&'a str, WasmEr } fn resolve_memory_import( - store: &Store, + store: &mut Store, import_ty: &ImportType, heap_pages: u32, ) -> Result { @@ -117,7 +120,7 @@ fn resolve_memory_import( } fn resolve_func_import( - store: &Store, + store: &mut Store, import_ty: &ImportType, host_functions: &[&'static dyn Function], allow_missing_func_imports: bool, @@ -162,19 +165,27 @@ struct HostFuncHandler { host_func: &'static dyn Function, } -fn call_static( +fn call_static<'a>( static_func: &'static dyn Function, wasmtime_params: &[Val], wasmtime_results: &mut [Val], + mut caller: Caller<'a, StoreData>, ) -> Result<(), wasmtime::Trap> { - let unwind_result = state_holder::with_context(|host_ctx| { - let mut host_ctx = host_ctx.expect( - "host functions can be called only from wasm instance; - wasm instance is always called initializing context; - therefore host_ctx cannot be None; - qed - ", - ); + let unwind_result = { + let host_state = caller + .data() + .host_state() + .expect( + "host functions can be called only from wasm instance; + wasm instance is always called initializing context; + therefore host_ctx cannot be None; + qed + ", + ) + .clone(); + + let mut host_ctx = host_state.materialize(&mut caller); + // `from_wasmtime_val` panics if it encounters a value that doesn't fit into the values // available in substrate. // @@ -185,7 +196,7 @@ fn call_static( std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { static_func.execute(&mut host_ctx, &mut params) })) - }); + }; let execution_result = match unwind_result { Ok(execution_result) => execution_result, @@ -219,11 +230,11 @@ impl HostFuncHandler { Self { host_func } } - fn into_extern(self, store: &Store) -> Extern { + fn into_extern(self, store: &mut Store) -> Extern { let host_func = self.host_func; let func_ty = wasmtime_func_sig(self.host_func); - let func = Func::new(store, func_ty, move |_, params, result| { - call_static(host_func, params, result) + let func = Func::new(store, func_ty, move |caller, params, result| { + call_static(host_func, params, result, caller) }); Extern::Func(func) } @@ -243,7 +254,7 @@ impl MissingHostFuncHandler { }) } - fn into_extern(self, store: &Store, func_ty: &FuncType) -> Extern { + fn into_extern(self, store: &mut Store, func_ty: &FuncType) -> Extern { let Self { module, name } = self; let func = Func::new(store, func_ty.clone(), move |_, _, _| { Err(Trap::new(format!("call to a missing function {}:{}", module, name))) diff --git a/client/executor/wasmtime/src/instance_wrapper.rs b/client/executor/wasmtime/src/instance_wrapper.rs index f66d62f673d90..ccfbb912b9a69 100644 --- a/client/executor/wasmtime/src/instance_wrapper.rs +++ b/client/executor/wasmtime/src/instance_wrapper.rs @@ -19,20 +19,18 @@ //! Defines data and logic needed for interaction with an WebAssembly instance of a substrate //! runtime module. -use crate::{ - imports::Imports, - util::{from_wasmtime_val, into_wasmtime_val}, -}; +use crate::imports::Imports; use sc_executor_common::{ error::{Error, Result}, - runtime_blob, util::checked_range, wasm_runtime::InvokeMethod, }; use sp_wasm_interface::{Pointer, Value, WordSize}; -use std::{marker, slice}; -use wasmtime::{Extern, Func, Global, Instance, Memory, Module, Store, Table, Val}; +use std::marker; +use wasmtime::{ + AsContext, AsContextMut, Extern, Func, Global, Instance, Memory, Module, Table, Val, +}; /// Invoked entrypoint format. pub enum EntryPointType { @@ -58,7 +56,12 @@ pub struct EntryPoint { impl EntryPoint { /// Call this entry point. - pub fn call(&self, data_ptr: Pointer, data_len: WordSize) -> Result { + pub fn call( + &self, + ctx: impl AsContextMut, + data_ptr: Pointer, + data_len: WordSize, + ) -> Result { let data_ptr = u32::from(data_ptr); let data_len = u32::from(data_len); @@ -68,15 +71,18 @@ impl EntryPoint { match self.call_type { EntryPointType::Direct { ref entrypoint } => - entrypoint.call((data_ptr, data_len)).map_err(handle_trap), + entrypoint.call(ctx, (data_ptr, data_len)).map_err(handle_trap), EntryPointType::Wrapped { func, ref dispatcher } => - dispatcher.call((func, data_ptr, data_len)).map_err(handle_trap), + dispatcher.call(ctx, (func, data_ptr, data_len)).map_err(handle_trap), } } - pub fn direct(func: wasmtime::Func) -> std::result::Result { + pub fn direct( + func: wasmtime::Func, + ctx: impl AsContext, + ) -> std::result::Result { let entrypoint = func - .typed::<(u32, u32), u64>() + .typed::<(u32, u32), u64, _>(ctx) .map_err(|_| "Invalid signature for direct entry point")? .clone(); Ok(Self { call_type: EntryPointType::Direct { entrypoint } }) @@ -85,9 +91,10 @@ impl EntryPoint { pub fn wrapped( dispatcher: wasmtime::Func, func: u32, + ctx: impl AsContext, ) -> std::result::Result { let dispatcher = dispatcher - .typed::<(u32, u32, u32), u64>() + .typed::<(u32, u32, u32), u64, _>(ctx) .map_err(|_| "Invalid signature for wrapped entry point")? .clone(); Ok(Self { call_type: EntryPointType::Wrapped { func, dispatcher } }) @@ -144,8 +151,13 @@ fn extern_func(extern_: &Extern) -> Option<&Func> { impl InstanceWrapper { /// Create a new instance wrapper from the given wasm module. - pub fn new(store: &Store, module: &Module, imports: &Imports, heap_pages: u32) -> Result { - let instance = Instance::new(store, module, &imports.externs) + pub fn new( + module: &Module, + imports: &Imports, + heap_pages: u32, + mut ctx: impl AsContextMut, + ) -> Result { + let instance = Instance::new(&mut ctx, module, &imports.externs) .map_err(|e| Error::from(format!("cannot instantiate: {}", e)))?; let memory = match imports.memory_import_index { @@ -153,51 +165,55 @@ impl InstanceWrapper { .expect("only memory can be at the `memory_idx`; qed") .clone(), None => { - let memory = get_linear_memory(&instance)?; - if !memory.grow(heap_pages).is_ok() { - return Err("failed to increase the linear memory size".into()) + let memory = get_linear_memory(&instance, &mut ctx)?; + if !memory.grow(&mut ctx, heap_pages).is_ok() { + return Err("failed top increase the linear memory size".into()) } memory }, }; - Ok(Self { - table: get_table(&instance), - instance, - memory, - _not_send_nor_sync: marker::PhantomData, - }) + let table = get_table(&instance, ctx); + + Ok(Self { table, instance, memory, _not_send_nor_sync: marker::PhantomData }) } /// Resolves a substrate entrypoint by the given name. /// /// An entrypoint must have a signature `(i32, i32) -> i64`, otherwise this function will return /// an error. - pub fn resolve_entrypoint(&self, method: InvokeMethod) -> Result { + pub fn resolve_entrypoint( + &self, + method: InvokeMethod, + mut ctx: impl AsContextMut, + ) -> Result { Ok(match method { InvokeMethod::Export(method) => { // Resolve the requested method and verify that it has a proper signature. - let export = self.instance.get_export(method).ok_or_else(|| { + let export = self.instance.get_export(&mut ctx, method).ok_or_else(|| { Error::from(format!("Exported method {} is not found", method)) })?; let func = extern_func(&export) .ok_or_else(|| Error::from(format!("Export {} is not a function", method)))? .clone(); - EntryPoint::direct(func).map_err(|_| { + EntryPoint::direct(func, ctx).map_err(|_| { Error::from(format!("Exported function '{}' has invalid signature.", method)) })? }, InvokeMethod::Table(func_ref) => { - let table = - self.instance.get_table("__indirect_function_table").ok_or(Error::NoTable)?; - let val = table.get(func_ref).ok_or(Error::NoTableEntryWithIndex(func_ref))?; + let table = self + .instance + .get_table(&mut ctx, "__indirect_function_table") + .ok_or(Error::NoTable)?; + let val = + table.get(&mut ctx, func_ref).ok_or(Error::NoTableEntryWithIndex(func_ref))?; let func = val .funcref() .ok_or(Error::TableElementIsNotAFunction(func_ref))? .ok_or(Error::FunctionRefIsNull(func_ref))? .clone(); - EntryPoint::direct(func).map_err(|_| { + EntryPoint::direct(func, ctx).map_err(|_| { Error::from(format!( "Function @{} in exported table has invalid signature for direct call.", func_ref, @@ -205,10 +221,12 @@ impl InstanceWrapper { })? }, InvokeMethod::TableWithWrapper { dispatcher_ref, func } => { - let table = - self.instance.get_table("__indirect_function_table").ok_or(Error::NoTable)?; + let table = self + .instance + .get_table(&mut ctx, "__indirect_function_table") + .ok_or(Error::NoTable)?; let val = table - .get(dispatcher_ref) + .get(&mut ctx, dispatcher_ref) .ok_or(Error::NoTableEntryWithIndex(dispatcher_ref))?; let dispatcher = val .funcref() @@ -216,7 +234,7 @@ impl InstanceWrapper { .ok_or(Error::FunctionRefIsNull(dispatcher_ref))? .clone(); - EntryPoint::wrapped(dispatcher, func).map_err(|_| { + EntryPoint::wrapped(dispatcher, func, ctx).map_err(|_| { Error::from(format!( "Function @{} in exported table has invalid signature for wrapped call.", dispatcher_ref, @@ -234,17 +252,17 @@ impl InstanceWrapper { /// Reads `__heap_base: i32` global variable and returns it. /// /// If it doesn't exist, not a global or of not i32 type returns an error. - pub fn extract_heap_base(&self) -> Result { + pub fn extract_heap_base(&self, mut ctx: impl AsContextMut) -> Result { let heap_base_export = self .instance - .get_export("__heap_base") + .get_export(&mut ctx, "__heap_base") .ok_or_else(|| Error::from("__heap_base is not found"))?; let heap_base_global = extern_global(&heap_base_export) .ok_or_else(|| Error::from("__heap_base is not a global"))?; let heap_base = heap_base_global - .get() + .get(&mut ctx) .i32() .ok_or_else(|| Error::from("__heap_base is not a i32"))?; @@ -252,15 +270,15 @@ impl InstanceWrapper { } /// Get the value from a global with the given `name`. - pub fn get_global_val(&self, name: &str) -> Result> { - let global = match self.instance.get_export(name) { + pub fn get_global_val(&self, mut ctx: impl AsContextMut, name: &str) -> Result> { + let global = match self.instance.get_export(&mut ctx, name) { Some(global) => global, None => return Ok(None), }; let global = extern_global(&global).ok_or_else(|| format!("`{}` is not a global", name))?; - match global.get() { + match global.get(ctx) { Val::I32(val) => Ok(Some(Value::I32(val))), Val::I64(val) => Ok(Some(Value::I64(val))), Val::F32(val) => Ok(Some(Value::F32(val))), @@ -268,12 +286,17 @@ impl InstanceWrapper { _ => Err("Unknown value type".into()), } } + + /// Get a global with the given `name`. + pub fn get_global(&self, ctx: impl AsContextMut, name: &str) -> Option { + self.instance.get_global(ctx, name) + } } /// Extract linear memory instance from the given instance. -fn get_linear_memory(instance: &Instance) -> Result { +fn get_linear_memory(instance: &Instance, ctx: impl AsContextMut) -> Result { let memory_export = instance - .get_export("memory") + .get_export(ctx, "memory") .ok_or_else(|| Error::from("memory is not exported under `memory` name"))?; let memory = extern_memory(&memory_export) @@ -284,9 +307,9 @@ fn get_linear_memory(instance: &Instance) -> Result { } /// Extract the table from the given instance if any. -fn get_table(instance: &Instance) -> Option { +fn get_table(instance: &Instance, ctx: impl AsContextMut) -> Option
{ instance - .get_export("__indirect_function_table") + .get_export(ctx, "__indirect_function_table") .as_ref() .and_then(extern_table) .cloned() @@ -297,12 +320,17 @@ impl InstanceWrapper { /// Read data from a slice of memory into a newly allocated buffer. /// /// Returns an error if the read would go out of the memory bounds. - pub fn read_memory(&self, source_addr: Pointer, size: usize) -> Result> { - let range = checked_range(source_addr.into(), size, self.memory.data_size()) + pub fn read_memory( + &self, + ctx: impl AsContext, + source_addr: Pointer, + size: usize, + ) -> Result> { + let range = checked_range(source_addr.into(), size, self.memory.data_size(&ctx)) .ok_or_else(|| Error::Other("memory read is out of bounds".into()))?; let mut buffer = vec![0; range.len()]; - self.read_memory_into(source_addr, &mut buffer)?; + self.read_memory_into(ctx, source_addr, &mut buffer)?; Ok(buffer) } @@ -310,33 +338,35 @@ impl InstanceWrapper { /// Read data from the instance memory into a slice. /// /// Returns an error if the read would go out of the memory bounds. - pub fn read_memory_into(&self, source_addr: Pointer, dest: &mut [u8]) -> Result<()> { - unsafe { - // This should be safe since we don't grow up memory while caching this reference and - // we give up the reference before returning from this function. - let memory = self.memory_as_slice(); - - let range = checked_range(source_addr.into(), dest.len(), memory.len()) - .ok_or_else(|| Error::Other("memory read is out of bounds".into()))?; - dest.copy_from_slice(&memory[range]); - Ok(()) - } + pub fn read_memory_into( + &self, + ctx: impl AsContext, + address: Pointer, + dest: &mut [u8], + ) -> Result<()> { + let memory = self.memory.data(ctx.as_context()); + + let range = checked_range(address.into(), dest.len(), memory.len()) + .ok_or_else(|| Error::Other("memory read is out of bounds".into()))?; + dest.copy_from_slice(&memory[range]); + Ok(()) } /// Write data to the instance memory from a slice. /// /// Returns an error if the write would go out of the memory bounds. - pub fn write_memory_from(&self, dest_addr: Pointer, data: &[u8]) -> Result<()> { - unsafe { - // This should be safe since we don't grow up memory while caching this reference and - // we give up the reference before returning from this function. - let memory = self.memory_as_slice_mut(); - - let range = checked_range(dest_addr.into(), data.len(), memory.len()) - .ok_or_else(|| Error::Other("memory write is out of bounds".into()))?; - memory[range].copy_from_slice(data); - Ok(()) - } + pub fn write_memory_from( + &self, + mut ctx: impl AsContextMut, + address: Pointer, + data: &[u8], + ) -> Result<()> { + let memory = self.memory.data_mut(ctx.as_context_mut()); + + let range = checked_range(address.into(), data.len(), memory.len()) + .ok_or_else(|| Error::Other("memory write is out of bounds".into()))?; + memory[range].copy_from_slice(data); + Ok(()) } /// Allocate some memory of the given size. Returns pointer to the allocated memory region. @@ -345,16 +375,13 @@ impl InstanceWrapper { /// to get more details. pub fn allocate( &self, + mut ctx: impl AsContextMut, allocator: &mut sc_allocator::FreeingBumpHeapAllocator, size: WordSize, ) -> Result> { - unsafe { - // This should be safe since we don't grow up memory while caching this reference and - // we give up the reference before returning from this function. - let memory = self.memory_as_slice_mut(); + let memory = self.memory.data_mut(ctx.as_context_mut()); - allocator.allocate(memory, size).map_err(Into::into) - } + allocator.allocate(memory, size).map_err(Into::into) } /// Deallocate the memory pointed by the given pointer. @@ -362,64 +389,25 @@ impl InstanceWrapper { /// Returns `Err` in case the given memory region cannot be deallocated. pub fn deallocate( &self, + mut ctx: impl AsContextMut, allocator: &mut sc_allocator::FreeingBumpHeapAllocator, ptr: Pointer, ) -> Result<()> { - unsafe { - // This should be safe since we don't grow up memory while caching this reference and - // we give up the reference before returning from this function. - let memory = self.memory_as_slice_mut(); + let memory = self.memory.data_mut(ctx.as_context_mut()); - allocator.deallocate(memory, ptr).map_err(Into::into) - } - } - - /// Returns linear memory of the wasm instance as a slice. - /// - /// # Safety - /// - /// Wasmtime doesn't provide comprehensive documentation about the exact behavior of the data - /// pointer. If a dynamic style heap is used the base pointer of the heap can change. Since - /// growing, we cannot guarantee the lifetime of the returned slice reference. - unsafe fn memory_as_slice(&self) -> &[u8] { - let ptr = self.memory.data_ptr() as *const _; - let len = self.memory.data_size(); - - if len == 0 { - &[] - } else { - slice::from_raw_parts(ptr, len) - } - } - - /// Returns linear memory of the wasm instance as a slice. - /// - /// # Safety - /// - /// See `[memory_as_slice]`. In addition to those requirements, since a mutable reference is - /// returned it must be ensured that only one mutable and no shared references to memory exists - /// at the same time. - unsafe fn memory_as_slice_mut(&self) -> &mut [u8] { - let ptr = self.memory.data_ptr(); - let len = self.memory.data_size(); - - if len == 0 { - &mut [] - } else { - slice::from_raw_parts_mut(ptr, len) - } + allocator.deallocate(memory, ptr).map_err(Into::into) } /// Returns the pointer to the first byte of the linear memory for this instance. - pub fn base_ptr(&self) -> *const u8 { - self.memory.data_ptr() + pub fn base_ptr(&self, ctx: impl AsContext) -> *const u8 { + self.memory.data_ptr(ctx) } /// Removes physical backing from the allocated linear memory. This leads to returning the /// memory back to the system. While the memory is zeroed this is considered as a side-effect /// and is not relied upon. Thus this function acts as a hint. - pub fn decommit(&self) { - if self.memory.data_size() == 0 { + pub fn decommit(&self, ctx: impl AsContext) { + if self.memory.data_size(&ctx) == 0 { return } @@ -428,8 +416,8 @@ impl InstanceWrapper { use std::sync::Once; unsafe { - let ptr = self.memory.data_ptr(); - let len = self.memory.data_size(); + let ptr = self.memory.data_ptr(&ctx); + let len = self.memory.data_size(ctx); // Linux handles MADV_DONTNEED reliably. The result is that the given area // is unmapped and will be zeroed on the next pagefault. @@ -447,23 +435,3 @@ impl InstanceWrapper { } } } - -impl runtime_blob::InstanceGlobals for InstanceWrapper { - type Global = wasmtime::Global; - - fn get_global(&self, export_name: &str) -> Self::Global { - self.instance - .get_global(export_name) - .expect("get_global is guaranteed to be called with an export name of a global; qed") - } - - fn get_global_value(&self, global: &Self::Global) -> Value { - from_wasmtime_val(global.get()) - } - - fn set_global_value(&self, global: &Self::Global, value: Value) { - global.set(into_wasmtime_val(value)).expect( - "the value is guaranteed to be of the same value; the global is guaranteed to be mutable; qed", - ); - } -} diff --git a/client/executor/wasmtime/src/lib.rs b/client/executor/wasmtime/src/lib.rs index 62b0b205f6de6..e0d6a262afda9 100644 --- a/client/executor/wasmtime/src/lib.rs +++ b/client/executor/wasmtime/src/lib.rs @@ -16,12 +16,21 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -/// ! Defines a `WasmRuntime` that uses the Wasmtime JIT to execute. +//! Defines a `WasmRuntime` that uses the Wasmtime JIT to execute. +//! +//! You can choose a profiling strategy at runtime with +//! environment variable `WASMTIME_PROFILING_STRATEGY`: +//! +//! | `WASMTIME_PROFILING_STRATEGY` | Effect | +//! |-------------|-------------------------| +//! | undefined | No profiling | +//! | `"jitdump"` | jitdump profiling | +//! | other value | No profiling (warning) | + mod host; mod imports; mod instance_wrapper; mod runtime; -mod state_holder; mod util; #[cfg(test)] diff --git a/client/executor/wasmtime/src/runtime.rs b/client/executor/wasmtime/src/runtime.rs index f6878ec5ee6e1..a62356357b1f4 100644 --- a/client/executor/wasmtime/src/runtime.rs +++ b/client/executor/wasmtime/src/runtime.rs @@ -22,13 +22,15 @@ use crate::{ host::HostState, imports::{resolve_imports, Imports}, instance_wrapper::{EntryPoint, InstanceWrapper}, - state_holder, + util, }; use sc_allocator::FreeingBumpHeapAllocator; use sc_executor_common::{ error::{Result, WasmError}, - runtime_blob::{DataSegmentsSnapshot, ExposedMutableGlobalsSet, GlobalsSnapshot, RuntimeBlob}, + runtime_blob::{ + self, DataSegmentsSnapshot, ExposedMutableGlobalsSet, GlobalsSnapshot, RuntimeBlob, + }, wasm_runtime::{InvokeMethod, WasmInstance, WasmModule}, }; use sp_runtime_interface::unpack_ptr_and_len; @@ -36,9 +38,29 @@ use sp_wasm_interface::{Function, Pointer, Value, WordSize}; use std::{ path::{Path, PathBuf}, rc::Rc, - sync::Arc, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, }; -use wasmtime::{Engine, Store}; +use wasmtime::{AsContext, AsContextMut, Engine, StoreLimits}; + +pub(crate) struct StoreData { + /// The limits we aply to the store. We need to store it here to return a reference to this + /// object when we have the limits enabled. + limits: StoreLimits, + /// This will only be set when we call into the runtime. + host_state: Option>, +} + +impl StoreData { + /// Returns a reference to the host state. + pub fn host_state(&self) -> Option<&Rc> { + self.host_state.as_ref() + } +} + +pub(crate) type Store = wasmtime::Store; enum Strategy { FastInstanceReuse { @@ -46,6 +68,7 @@ enum Strategy { globals_snapshot: GlobalsSnapshot, data_segments_snapshot: Arc, heap_base: u32, + store: Store, }, RecreateInstance(InstanceCreator), } @@ -58,8 +81,33 @@ struct InstanceCreator { } impl InstanceCreator { - fn instantiate(&self) -> Result { - InstanceWrapper::new(&self.store, &*self.module, &*self.imports, self.heap_pages) + fn instantiate(&mut self) -> Result { + InstanceWrapper::new(&*self.module, &*self.imports, self.heap_pages, &mut self.store) + } +} + +struct InstanceGlobals<'a, C> { + ctx: &'a mut C, + instance: &'a InstanceWrapper, +} + +impl<'a, C: AsContextMut> runtime_blob::InstanceGlobals for InstanceGlobals<'a, C> { + type Global = wasmtime::Global; + + fn get_global(&mut self, export_name: &str) -> Self::Global { + self.instance + .get_global(&mut self.ctx, export_name) + .expect("get_global is guaranteed to be called with an export name of a global; qed") + } + + fn get_global_value(&mut self, global: &Self::Global) -> Value { + util::from_wasmtime_val(global.get(&mut self.ctx)) + } + + fn set_global_value(&mut self, global: &Self::Global, value: Value) { + global.set(&mut self.ctx, util::into_wasmtime_val(value)).expect( + "the value is guaranteed to be of the same value; the global is guaranteed to be mutable; qed", + ); } } @@ -82,19 +130,25 @@ pub struct WasmtimeRuntime { impl WasmtimeRuntime { /// Creates the store respecting the set limits. fn new_store(&self) -> Store { - match self.config.max_memory_pages { - Some(max_memory_pages) => Store::new_with_limits( - &self.engine, - wasmtime::StoreLimitsBuilder::new().memory_pages(max_memory_pages).build(), - ), - None => Store::new(&self.engine), + let limits = if let Some(max_memory_pages) = self.config.max_memory_pages { + wasmtime::StoreLimitsBuilder::new().memory_pages(max_memory_pages).build() + } else { + Default::default() + }; + + let mut store = Store::new(&self.engine, StoreData { limits, host_state: None }); + + if self.config.max_memory_pages.is_some() { + store.limiter(|s| &mut s.limits); } + + store } } impl WasmModule for WasmtimeRuntime { fn new_instance(&self) -> Result> { - let store = self.new_store(); + let mut store = self.new_store(); // Scan all imports, find the matching host functions, and create stubs that adapt arguments // and results. @@ -103,7 +157,7 @@ impl WasmModule for WasmtimeRuntime { // However, I am not sure if that's a good idea since it would be pushing our luck // further by assuming that `Store` not only `Send` but also `Sync`. let imports = resolve_imports( - &store, + &mut store, &self.module, &self.host_functions, self.config.heap_pages, @@ -112,21 +166,24 @@ impl WasmModule for WasmtimeRuntime { let strategy = if let Some(ref snapshot_data) = self.snapshot_data { let instance_wrapper = - InstanceWrapper::new(&store, &self.module, &imports, self.config.heap_pages)?; - let heap_base = instance_wrapper.extract_heap_base()?; + InstanceWrapper::new(&self.module, &imports, self.config.heap_pages, &mut store)?; + let heap_base = instance_wrapper.extract_heap_base(&mut store)?; // This function panics if the instance was created from a runtime blob different from // which the mutable globals were collected. Here, it is easy to see that there is only // a single runtime blob and thus it's the same that was used for both creating the // instance and collecting the mutable globals. - let globals_snapshot = - GlobalsSnapshot::take(&snapshot_data.mutable_globals, &instance_wrapper); + let globals_snapshot = GlobalsSnapshot::take( + &snapshot_data.mutable_globals, + &mut InstanceGlobals { ctx: &mut store, instance: &instance_wrapper }, + ); Strategy::FastInstanceReuse { instance_wrapper: Rc::new(instance_wrapper), globals_snapshot, data_segments_snapshot: snapshot_data.data_segments_snapshot.clone(), heap_base, + store, } } else { Strategy::RecreateInstance(InstanceCreator { @@ -152,48 +209,63 @@ pub struct WasmtimeInstance { unsafe impl Send for WasmtimeInstance {} impl WasmInstance for WasmtimeInstance { - fn call(&self, method: InvokeMethod, data: &[u8]) -> Result> { - match &self.strategy { + fn call(&mut self, method: InvokeMethod, data: &[u8]) -> Result> { + match &mut self.strategy { Strategy::FastInstanceReuse { instance_wrapper, globals_snapshot, data_segments_snapshot, heap_base, + ref mut store, } => { - let entrypoint = instance_wrapper.resolve_entrypoint(method)?; + let entrypoint = instance_wrapper.resolve_entrypoint(method, &mut *store)?; data_segments_snapshot.apply(|offset, contents| { - instance_wrapper.write_memory_from(Pointer::new(offset), contents) + instance_wrapper.write_memory_from(&mut *store, Pointer::new(offset), contents) })?; - globals_snapshot.apply(&**instance_wrapper); + globals_snapshot + .apply(&mut InstanceGlobals { ctx: &mut *store, instance: &*instance_wrapper }); let allocator = FreeingBumpHeapAllocator::new(*heap_base); - let result = - perform_call(data, Rc::clone(&instance_wrapper), entrypoint, allocator); + let result = perform_call( + &mut *store, + data, + instance_wrapper.clone(), + entrypoint, + allocator, + ); // Signal to the OS that we are done with the linear memory and that it can be // reclaimed. - instance_wrapper.decommit(); + instance_wrapper.decommit(&store); result }, - Strategy::RecreateInstance(instance_creator) => { + Strategy::RecreateInstance(ref mut instance_creator) => { let instance_wrapper = instance_creator.instantiate()?; - let heap_base = instance_wrapper.extract_heap_base()?; - let entrypoint = instance_wrapper.resolve_entrypoint(method)?; + let heap_base = instance_wrapper.extract_heap_base(&mut instance_creator.store)?; + let entrypoint = + instance_wrapper.resolve_entrypoint(method, &mut instance_creator.store)?; let allocator = FreeingBumpHeapAllocator::new(heap_base); - perform_call(data, Rc::new(instance_wrapper), entrypoint, allocator) + perform_call( + &mut instance_creator.store, + data, + Rc::new(instance_wrapper), + entrypoint, + allocator, + ) }, } } - fn get_global_const(&self, name: &str) -> Result> { - match &self.strategy { - Strategy::FastInstanceReuse { instance_wrapper, .. } => - instance_wrapper.get_global_val(name), - Strategy::RecreateInstance(instance_creator) => - instance_creator.instantiate()?.get_global_val(name), + fn get_global_const(&mut self, name: &str) -> Result> { + match &mut self.strategy { + Strategy::FastInstanceReuse { instance_wrapper, ref mut store, .. } => + instance_wrapper.get_global_val(&mut *store, name), + Strategy::RecreateInstance(ref mut instance_creator) => instance_creator + .instantiate()? + .get_global_val(&mut instance_creator.store, name), } } @@ -204,8 +276,8 @@ impl WasmInstance for WasmtimeInstance { // associated with it. None }, - Strategy::FastInstanceReuse { instance_wrapper, .. } => - Some(instance_wrapper.base_ptr()), + Strategy::FastInstanceReuse { instance_wrapper, store, .. } => + Some(instance_wrapper.base_ptr(&store)), } } } @@ -253,6 +325,23 @@ fn common_config(semantics: &Semantics) -> std::result::Result wasmtime::ProfilingStrategy::JitDump, + None => wasmtime::ProfilingStrategy::None, + Some(_) => { + // Remember if we have already logged a warning due to an unknown profiling strategy. + static UNKNOWN_PROFILING_STRATEGY: AtomicBool = AtomicBool::new(false); + // Make sure that the warning will not be relogged regularly. + if !UNKNOWN_PROFILING_STRATEGY.swap(true, Ordering::Relaxed) { + log::warn!("WASMTIME_PROFILING_STRATEGY is set to unknown value, ignored."); + } + wasmtime::ProfilingStrategy::None + }, + }; + config + .profiler(profiler) + .map_err(|e| WasmError::Instantiation(format!("fail to set profiler: {}", e)))?; + if let Some(DeterministicStackLimit { native_stack_max, .. }) = semantics.deterministic_stack_limit { @@ -536,40 +625,50 @@ pub fn prepare_runtime_artifact( } fn perform_call( + mut ctx: impl AsContextMut, data: &[u8], instance_wrapper: Rc, entrypoint: EntryPoint, mut allocator: FreeingBumpHeapAllocator, ) -> Result> { - let (data_ptr, data_len) = inject_input_data(&instance_wrapper, &mut allocator, data)?; + let (data_ptr, data_len) = + inject_input_data(&mut ctx, &instance_wrapper, &mut allocator, data)?; let host_state = HostState::new(allocator, instance_wrapper.clone()); - let ret = state_holder::with_initialized_state(&host_state, || -> Result<_> { - Ok(unpack_ptr_and_len(entrypoint.call(data_ptr, data_len)?)) - }); + + // Set the host state before calling into wasm. + ctx.as_context_mut().data_mut().host_state = Some(Rc::new(host_state)); + + let ret = entrypoint.call(&mut ctx, data_ptr, data_len).map(unpack_ptr_and_len); + + // Reset the host state + ctx.as_context_mut().data_mut().host_state = None; + let (output_ptr, output_len) = ret?; - let output = extract_output_data(&instance_wrapper, output_ptr, output_len)?; + let output = extract_output_data(ctx, &instance_wrapper, output_ptr, output_len)?; Ok(output) } fn inject_input_data( + mut ctx: impl AsContextMut, instance: &InstanceWrapper, allocator: &mut FreeingBumpHeapAllocator, data: &[u8], ) -> Result<(Pointer, WordSize)> { let data_len = data.len() as WordSize; - let data_ptr = instance.allocate(allocator, data_len)?; - instance.write_memory_from(data_ptr, data)?; + let data_ptr = instance.allocate(&mut ctx, allocator, data_len)?; + instance.write_memory_from(ctx, data_ptr, data)?; Ok((data_ptr, data_len)) } fn extract_output_data( + ctx: impl AsContext, instance: &InstanceWrapper, output_ptr: u32, output_len: u32, ) -> Result> { let mut output = vec![0; output_len as usize]; - instance.read_memory_into(Pointer::new(output_ptr), &mut output)?; + instance.read_memory_into(ctx, Pointer::new(output_ptr), &mut output)?; Ok(output) } diff --git a/client/executor/wasmtime/src/state_holder.rs b/client/executor/wasmtime/src/state_holder.rs deleted file mode 100644 index 0e2684cd25130..0000000000000 --- a/client/executor/wasmtime/src/state_holder.rs +++ /dev/null @@ -1,45 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use crate::host::{HostContext, HostState}; - -scoped_tls::scoped_thread_local!(static HOST_STATE: HostState); - -/// Provide `HostState` for the runtime method call and execute the given function `f`. -/// -/// During the execution of the provided function `with_context` will be callable. -pub fn with_initialized_state(s: &HostState, f: F) -> R -where - F: FnOnce() -> R, -{ - HOST_STATE.set(s, f) -} - -/// Create a `HostContext` from the contained `HostState` and execute the given function `f`. -/// -/// This function is only callable within closure passed to `init_state`. Otherwise, the passed -/// context will be `None`. -pub fn with_context(f: F) -> R -where - F: FnOnce(Option) -> R, -{ - if !HOST_STATE.is_set() { - return f(None) - } - HOST_STATE.with(|state| f(Some(state.materialize()))) -} diff --git a/client/executor/wasmtime/src/tests.rs b/client/executor/wasmtime/src/tests.rs index 366352d7f5c39..2a8bcc0b01b04 100644 --- a/client/executor/wasmtime/src/tests.rs +++ b/client/executor/wasmtime/src/tests.rs @@ -116,7 +116,7 @@ fn test_nan_canonicalization() { builder.build() }; - let instance = runtime.new_instance().expect("failed to instantiate a runtime"); + let mut instance = runtime.new_instance().expect("failed to instantiate a runtime"); /// A NaN with canonical payload bits. const CANONICAL_NAN_BITS: u32 = 0x7fc00000; @@ -159,7 +159,7 @@ fn test_stack_depth_reaching() { builder.deterministic_stack(true); builder.build() }; - let instance = runtime.new_instance().expect("failed to instantiate a runtime"); + let mut instance = runtime.new_instance().expect("failed to instantiate a runtime"); let err = instance.call_export("test-many-locals", &[]).unwrap_err(); @@ -180,7 +180,7 @@ fn test_max_memory_pages() { builder.max_memory_pages(max_memory_pages); builder.build() }; - let instance = runtime.new_instance()?; + let mut instance = runtime.new_instance()?; let _ = instance.call_export("main", &[])?; Ok(()) } diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index f27a530ed2f40..c79698902e975 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -1165,7 +1165,7 @@ where debug!(target: "afg", "Finding best chain containing block {:?} with number limit {:?}", block, limit); let result = match select_chain.finality_target(block, None).await { - Ok(Some(best_hash)) => { + Ok(best_hash) => { let best_header = client .header(BlockId::Hash(best_hash))? .expect("Header known to exist after `finality_target` call; qed"); @@ -1223,10 +1223,6 @@ where }) .or_else(|| Some((target_header.hash(), *target_header.number()))) }, - Ok(None) => { - debug!(target: "afg", "Encountered error finding best chain containing {:?}: couldn't find target block", block); - None - }, Err(e) => { debug!(target: "afg", "Encountered error finding best chain containing {:?}: {:?}", block, e); None diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index f663bfe94afdf..1c4d1b4e97b88 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -118,10 +118,10 @@ where ) .await } else { - Ok(Some(pending_change.canon_hash)) + Ok(pending_change.canon_hash) }; - if let Ok(Some(hash)) = effective_block_hash { + if let Ok(hash) = effective_block_hash { if let Ok(Some(header)) = self.inner.header(BlockId::Hash(hash)) { if *header.number() == pending_change.effective_number() { out.push((header.hash(), *header.number())); diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml index c078e5b892fe2..b5fdcfd434300 100644 --- a/client/network-gossip/Cargo.toml +++ b/client/network-gossip/Cargo.toml @@ -26,6 +26,6 @@ sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } tracing = "0.1.25" [dev-dependencies] -async-std = "1.6.5" +async-std = "1.10.0" quickcheck = "1.0.3" substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 873c2a847a29a..68b9595ae1908 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -18,7 +18,7 @@ prost-build = "0.8" [dependencies] async-trait = "0.1" -async-std = "1.6.5" +async-std = "1.10.0" bitflags = "1.3.2" cid = "0.6.0" bytes = "1" @@ -48,7 +48,7 @@ sc-client-api = { version = "4.0.0-dev", path = "../api" } sc-peerset = { version = "4.0.0-dev", path = "../peerset" } serde = { version = "1.0.126", features = ["derive"] } serde_json = "1.0.68" -smallvec = "1.5.0" +smallvec = "1.7.0" sp-arithmetic = { version = "4.0.0-dev", path = "../../primitives/arithmetic" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index 08d061ee26b23..7b334175a2805 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -38,6 +38,7 @@ use libp2p::{ use log::debug; use prost::Message; use sc_consensus::import_queue::{IncomingBlock, Origin}; +use sc_peerset::PeersetHandle; use sp_consensus::BlockOrigin; use sp_runtime::{ traits::{Block as BlockT, NumberFor}, @@ -206,6 +207,7 @@ impl Behaviour { light_client_request_protocol_config: request_responses::ProtocolConfig, // All remaining request protocol configs. mut request_response_protocols: Vec, + peerset: PeersetHandle, ) -> Result { // Extract protocol name and add to `request_response_protocols`. let block_request_protocol_name = block_request_protocol_config.name.to_string(); @@ -229,6 +231,7 @@ impl Behaviour { bitswap: bitswap.into(), request_responses: request_responses::RequestResponsesBehaviour::new( request_response_protocols.into_iter(), + peerset, )?, light_client_request_sender, events: VecDeque::new(), diff --git a/client/network/src/config.rs b/client/network/src/config.rs index d08e29ef8589f..76c806ccbf7b6 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -155,14 +155,14 @@ pub enum Role { } impl Role { - /// True for `Role::Authority` + /// True for [`Role::Authority`]. pub fn is_authority(&self) -> bool { - matches!(self, Role::Authority { .. }) + matches!(self, Self::Authority { .. }) } - /// True for `Role::Light` + /// True for [`Role::Light`]. pub fn is_light(&self) -> bool { - matches!(self, Role::Light { .. }) + matches!(self, Self::Light { .. }) } } @@ -329,7 +329,7 @@ impl FromStr for MultiaddrWithPeerId { fn from_str(s: &str) -> Result { let (peer_id, multiaddr) = parse_str_addr(s)?; - Ok(MultiaddrWithPeerId { peer_id, multiaddr }) + Ok(Self { peer_id, multiaddr }) } } diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index 71e46f73234c7..431de50c0f192 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -733,7 +733,8 @@ impl NetworkBehaviour for DiscoveryBehaviour { let ev = DiscoveryOut::Discovered(peer); return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) }, - KademliaEvent::PendingRoutablePeer { .. } => { + KademliaEvent::PendingRoutablePeer { .. } | + KademliaEvent::InboundRequestServed { .. } => { // We are not interested in this event at the moment. }, KademliaEvent::OutboundQueryCompleted { @@ -844,8 +845,8 @@ impl NetworkBehaviour for DiscoveryBehaviour { ), }, // We never start any other type of query. - e => { - debug!(target: "sub-libp2p", "Libp2p => Unhandled Kademlia event: {:?}", e) + KademliaEvent::OutboundQueryCompleted { result: e, .. } => { + warn!(target: "sub-libp2p", "Libp2p => Unhandled Kademlia event: {:?}", e) }, }, NetworkBehaviourAction::DialAddress { address } => diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index 8938c27aeddd8..001f6cbd7e455 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -143,10 +143,10 @@ pub struct RemoteReadResponse { /// Announcement summary used for debug logging. #[derive(Debug)] pub struct AnnouncementSummary { - block_hash: H::Hash, - number: H::Number, - parent_hash: H::Hash, - state: Option, + pub block_hash: H::Hash, + pub number: H::Number, + pub parent_hash: H::Hash, + pub state: Option, } impl generic::BlockAnnounce { diff --git a/client/network/src/protocol/notifications/behaviour.rs b/client/network/src/protocol/notifications/behaviour.rs index da2967d6f26eb..01138e3207570 100644 --- a/client/network/src/protocol/notifications/behaviour.rs +++ b/client/network/src/protocol/notifications/behaviour.rs @@ -417,7 +417,7 @@ impl Notifications { /// Returns true if we have an open substream to the given peer. pub fn is_open(&self, peer_id: &PeerId, set_id: sc_peerset::SetId) -> bool { - self.peers.get(&(peer_id.clone(), set_id)).map(|p| p.is_open()).unwrap_or(false) + self.peers.get(&(*peer_id, set_id)).map(|p| p.is_open()).unwrap_or(false) } /// Disconnects the given peer if we are connected to it. @@ -1777,7 +1777,7 @@ impl NetworkBehaviour for Notifications { "Handler({}, {:?}) => CloseResult({:?})", source, connection, set_id); - match self.peers.get_mut(&(source.clone(), set_id)) { + match self.peers.get_mut(&(source, set_id)) { // Move the connection from `Closing` to `Closed`. Some(PeerState::Incoming { connections, .. }) | Some(PeerState::DisabledPendingEnable { connections, .. }) | diff --git a/client/network/src/request_responses.rs b/client/network/src/request_responses.rs index 6ebc7416c2a35..0908d7510e359 100644 --- a/client/network/src/request_responses.rs +++ b/client/network/src/request_responses.rs @@ -64,6 +64,7 @@ use std::{ }; pub use libp2p::request_response::{InboundFailure, OutboundFailure, RequestId}; +use sc_peerset::{PeersetHandle, BANNED_THRESHOLD}; /// Configuration for a single request-response protocol. #[derive(Debug, Clone)] @@ -256,6 +257,27 @@ pub struct RequestResponsesBehaviour { /// Whenever a response is received on `pending_responses`, insert a channel to be notified /// when the request has been sent out. send_feedback: HashMap>, + + /// Primarily used to get a reputation of a node. + peerset: PeersetHandle, + + /// Pending message request, holds `MessageRequest` as a Future state to poll it + /// until we get a response from `Peerset` + message_request: Option, +} + +// This is a state of processing incoming request Message. +// The main reason of this struct is to hold `get_peer_reputation` as a Future state. +struct MessageRequest { + peer: PeerId, + request_id: RequestId, + request: Vec, + channel: ResponseChannel, ()>>, + protocol: String, + resp_builder: Option>, + // Once we get incoming request we save all params, create an async call to Peerset + // to get the reputation of the peer. + get_peer_reputation: Pin> + Send>>, } /// Generated by the response builder and waiting to be processed. @@ -270,7 +292,10 @@ struct RequestProcessingOutcome { impl RequestResponsesBehaviour { /// Creates a new behaviour. Must be passed a list of supported protocols. Returns an error if /// the same protocol is passed twice. - pub fn new(list: impl Iterator) -> Result { + pub fn new( + list: impl Iterator, + peerset: PeersetHandle, + ) -> Result { let mut protocols = HashMap::new(); for protocol in list { let mut cfg = RequestResponseConfig::default(); @@ -304,6 +329,8 @@ impl RequestResponsesBehaviour { pending_responses: Default::default(), pending_responses_arrival_time: Default::default(), send_feedback: Default::default(), + peerset, + message_request: None, }) } @@ -492,6 +519,93 @@ impl NetworkBehaviour for RequestResponsesBehaviour { >, > { 'poll_all: loop { + if let Some(message_request) = self.message_request.take() { + // Now we can can poll `MessageRequest` until we get the reputation + + let MessageRequest { + peer, + request_id, + request, + channel, + protocol, + resp_builder, + mut get_peer_reputation, + } = message_request; + + let reputation = Future::poll(Pin::new(&mut get_peer_reputation), cx); + match reputation { + Poll::Pending => { + // Save the state to poll it again next time. + + self.message_request = Some(MessageRequest { + peer, + request_id, + request, + channel, + protocol, + resp_builder, + get_peer_reputation, + }); + return Poll::Pending + }, + Poll::Ready(reputation) => { + // Once we get the reputation we can continue processing the request. + + let reputation = reputation.expect( + "The channel can only be closed if the peerset no longer exists; qed", + ); + + if reputation < BANNED_THRESHOLD { + log::debug!( + target: "sub-libp2p", + "Cannot handle requests from a node with a low reputation {}: {}", + peer, + reputation, + ); + continue 'poll_all + } + + let (tx, rx) = oneshot::channel(); + + // Submit the request to the "response builder" passed by the user at + // initialization. + if let Some(mut resp_builder) = resp_builder { + // If the response builder is too busy, silently drop `tx`. This + // will be reported by the corresponding `RequestResponse` through + // an `InboundFailure::Omission` event. + let _ = resp_builder.try_send(IncomingRequest { + peer: peer.clone(), + payload: request, + pending_response: tx, + }); + } else { + debug_assert!(false, "Received message on outbound-only protocol."); + } + + let protocol = Cow::from(protocol); + self.pending_responses.push(Box::pin(async move { + // The `tx` created above can be dropped if we are not capable of + // processing this request, which is reflected as a + // `InboundFailure::Omission` event. + if let Ok(response) = rx.await { + Some(RequestProcessingOutcome { + peer, + request_id, + protocol, + inner_channel: channel, + response, + }) + } else { + None + } + })); + + // This `continue` makes sure that `pending_responses` gets polled + // after we have added the new element. + continue 'poll_all + }, + } + } // Poll to see if any response is ready to be sent back. while let Poll::Ready(Some(outcome)) = self.pending_responses.poll_next_unpin(cx) { let RequestProcessingOutcome { @@ -585,42 +699,24 @@ impl NetworkBehaviour for RequestResponsesBehaviour { Instant::now(), ); - let (tx, rx) = oneshot::channel(); - - // Submit the request to the "response builder" passed by the user at - // initialization. - if let Some(resp_builder) = resp_builder { - // If the response builder is too busy, silently drop `tx`. This - // will be reported by the corresponding `RequestResponse` through - // an `InboundFailure::Omission` event. - let _ = resp_builder.try_send(IncomingRequest { - peer: peer.clone(), - payload: request, - pending_response: tx, - }); - } else { - debug_assert!(false, "Received message on outbound-only protocol."); - } + let get_peer_reputation = + self.peerset.clone().peer_reputation(peer.clone()); + let get_peer_reputation = Box::pin(get_peer_reputation); - let protocol = protocol.clone(); - self.pending_responses.push(Box::pin(async move { - // The `tx` created above can be dropped if we are not capable of - // processing this request, which is reflected as a - // `InboundFailure::Omission` event. - if let Ok(response) = rx.await { - Some(RequestProcessingOutcome { - peer, - request_id, - protocol, - inner_channel: channel, - response, - }) - } else { - None - } - })); - - // This `continue` makes sure that `pending_responses` gets polled + // Save the Future-like state with params to poll `get_peer_reputation` + // and to continue processing the request once we get the reputation of + // the peer. + self.message_request = Some(MessageRequest { + peer, + request_id, + request, + channel, + protocol: protocol.to_string(), + resp_builder: resp_builder.clone(), + get_peer_reputation, + }); + + // This `continue` makes sure that `message_request` gets polled // after we have added the new element. continue 'poll_all }, @@ -934,11 +1030,12 @@ mod tests { swarm::{Swarm, SwarmEvent}, Multiaddr, }; + use sc_peerset::{Peerset, PeersetConfig, SetConfig}; use std::{iter, time::Duration}; fn build_swarm( list: impl Iterator, - ) -> (Swarm, Multiaddr) { + ) -> (Swarm, Multiaddr, Peerset) { let keypair = Keypair::generate_ed25519(); let noise_keys = @@ -950,13 +1047,29 @@ mod tests { .multiplex(libp2p::yamux::YamuxConfig::default()) .boxed(); - let behaviour = RequestResponsesBehaviour::new(list).unwrap(); + let config = PeersetConfig { + sets: vec![SetConfig { + in_peers: u32::max_value(), + out_peers: u32::max_value(), + bootnodes: vec![], + reserved_nodes: Default::default(), + reserved_only: false, + }], + }; + + let (peerset, handle) = Peerset::from_config(config); + + let behaviour = RequestResponsesBehaviour::new(list, handle).unwrap(); let mut swarm = Swarm::new(transport, behaviour, keypair.public().into_peer_id()); let listen_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); swarm.listen_on(listen_addr.clone()).unwrap(); - (swarm, listen_addr) + (swarm, listen_addr, peerset) + } + + async fn loop_peerset(peerset: Peerset) { + let _: Vec<_> = peerset.collect().await; } #[test] @@ -1007,10 +1120,12 @@ mod tests { Swarm::dial_addr(&mut swarms[0].0, dial_addr).unwrap(); } + let (mut swarm, _, peerset) = swarms.remove(0); + // Process every peerset event in the background. + pool.spawner().spawn_obj(loop_peerset(peerset).boxed().into()).unwrap(); // Running `swarm[0]` in the background. pool.spawner() .spawn_obj({ - let (mut swarm, _) = swarms.remove(0); async move { loop { match swarm.select_next_some().await { @@ -1027,7 +1142,9 @@ mod tests { .unwrap(); // Remove and run the remaining swarm. - let (mut swarm, _) = swarms.remove(0); + let (mut swarm, _, peerset) = swarms.remove(0); + // Process every peerset event in the background. + pool.spawner().spawn_obj(loop_peerset(peerset).boxed().into()).unwrap(); pool.run_until(async move { let mut response_receiver = None; @@ -1105,9 +1222,11 @@ mod tests { // Running `swarm[0]` in the background until a `InboundRequest` event happens, // which is a hint about the test having ended. + let (mut swarm, _, peerset) = swarms.remove(0); + // Process every peerset event in the background. + pool.spawner().spawn_obj(loop_peerset(peerset).boxed().into()).unwrap(); pool.spawner() .spawn_obj({ - let (mut swarm, _) = swarms.remove(0); async move { loop { match swarm.select_next_some().await { @@ -1125,7 +1244,9 @@ mod tests { .unwrap(); // Remove and run the remaining swarm. - let (mut swarm, _) = swarms.remove(0); + let (mut swarm, _, peerset) = swarms.remove(0); + // Process every peerset event in the background. + pool.spawner().spawn_obj(loop_peerset(peerset).boxed().into()).unwrap(); pool.run_until(async move { let mut response_receiver = None; @@ -1195,7 +1316,7 @@ mod tests { build_swarm(protocol_configs.into_iter()).0 }; - let (mut swarm_2, mut swarm_2_handler_1, mut swarm_2_handler_2, listen_add_2) = { + let (mut swarm_2, mut swarm_2_handler_1, mut swarm_2_handler_2, listen_add_2, peerset) = { let (tx_1, rx_1) = mpsc::channel(64); let (tx_2, rx_2) = mpsc::channel(64); @@ -1216,10 +1337,12 @@ mod tests { }, ]; - let (swarm, listen_addr) = build_swarm(protocol_configs.into_iter()); + let (swarm, listen_addr, peerset) = build_swarm(protocol_configs.into_iter()); - (swarm, rx_1, rx_2, listen_addr) + (swarm, rx_1, rx_2, listen_addr, peerset) }; + // Process every peerset event in the background. + pool.spawner().spawn_obj(loop_peerset(peerset).boxed().into()).unwrap(); // Ask swarm 1 to dial swarm 2. There isn't any discovery mechanism in place in this test, // so they wouldn't connect to each other. diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 525470145b78c..23f9c614d9069 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -355,6 +355,7 @@ impl NetworkWorker { bitswap, params.light_client_request_protocol_config, params.network_config.request_response_protocols, + peerset_handle.clone(), ); match result { diff --git a/client/network/src/transactions.rs b/client/network/src/transactions.rs index 82e7e8fe1714c..9565466ea1c07 100644 --- a/client/network/src/transactions.rs +++ b/client/network/src/transactions.rs @@ -92,7 +92,7 @@ struct Metrics { impl Metrics { fn register(r: &Registry) -> Result { - Ok(Metrics { + Ok(Self { propagated_transactions: register( Counter::new( "sync_propagated_transactions", @@ -133,7 +133,7 @@ pub struct TransactionsHandlerPrototype { impl TransactionsHandlerPrototype { /// Create a new instance. pub fn new(protocol_id: ProtocolId) -> Self { - TransactionsHandlerPrototype { + Self { protocol_name: Cow::from({ let mut proto = String::new(); proto.push_str("/"); @@ -401,7 +401,7 @@ impl TransactionsHandler { let hash = self.transaction_pool.hash_of(&t); peer.known_transactions.insert(hash.clone()); - self.service.report_peer(who.clone(), rep::ANY_TRANSACTION); + self.service.report_peer(who, rep::ANY_TRANSACTION); match self.pending_transactions_peers.entry(hash.clone()) { Entry::Vacant(entry) => { @@ -409,10 +409,10 @@ impl TransactionsHandler { validation: self.transaction_pool.import(t), tx_hash: hash, }); - entry.insert(vec![who.clone()]); + entry.insert(vec![who]); }, Entry::Occupied(mut entry) => { - entry.get_mut().push(who.clone()); + entry.get_mut().push(who); }, } } @@ -468,11 +468,8 @@ impl TransactionsHandler { propagated_to.entry(hash).or_default().push(who.to_base58()); } trace!(target: "sync", "Sending {} transactions to {}", to_send.len(), who); - self.service.write_notification( - who.clone(), - self.protocol_name.clone(), - to_send.encode(), - ); + self.service + .write_notification(*who, self.protocol_name.clone(), to_send.encode()); } } diff --git a/client/network/src/transport.rs b/client/network/src/transport.rs index 04223c6d6846f..3f977a21b1165 100644 --- a/client/network/src/transport.rs +++ b/client/network/src/transport.rs @@ -58,20 +58,16 @@ pub fn build_transport( let desktop_trans = websocket::WsConfig::new(desktop_trans.clone()).or_transport(desktop_trans); let dns_init = futures::executor::block_on(dns::DnsConfig::system(desktop_trans.clone())); - OptionalTransport::some(if let Ok(dns) = dns_init { + EitherTransport::Left(if let Ok(dns) = dns_init { EitherTransport::Left(dns) } else { EitherTransport::Right(desktop_trans.map_err(dns::DnsErr::Transport)) }) } else { - // For the in-memory case we set up the transport with an `.or_transport` below. - OptionalTransport::none() + EitherTransport::Right(OptionalTransport::some( + libp2p::core::transport::MemoryTransport::default(), + )) }; - let transport = transport.or_transport(if memory_only { - OptionalTransport::some(libp2p::core::transport::MemoryTransport::default()) - } else { - OptionalTransport::none() - }); let (transport, bandwidth) = bandwidth::BandwidthLogging::new(transport); diff --git a/client/network/test/Cargo.toml b/client/network/test/Cargo.toml index 88399ca54a436..b4c3a74607f65 100644 --- a/client/network/test/Cargo.toml +++ b/client/network/test/Cargo.toml @@ -13,7 +13,7 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-std = "1.6.5" +async-std = "1.10.0" sc-network = { version = "0.10.0-dev", path = "../" } log = "0.4.8" parking_lot = "0.11.1" diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index c86ccfeac3ed1..f413b705e52c4 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -616,9 +616,10 @@ fn syncs_header_only_forks() { let small_hash = net.peer(0).client().info().best_hash; net.peer(1).push_blocks(4, false); - net.block_until_sync(); // Peer 1 will sync the small fork even though common block state is missing - assert!(net.peer(1).has_block(&small_hash)); + while !net.peer(1).has_block(&small_hash) { + net.block_until_idle(); + } } #[test] @@ -855,12 +856,19 @@ fn sync_to_tip_requires_that_sync_protocol_is_informed_about_best_block() { net.block_until_idle(); // Connect another node that should now sync to the tip - net.add_full_peer_with_config(Default::default()); - net.block_until_connected(); + net.add_full_peer_with_config(FullPeerConfig { + connect_to_peers: Some(vec![0]), + ..Default::default() + }); - while !net.peer(2).has_block(&block_hash) { - net.block_until_idle(); - } + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if net.peer(2).has_block(&block_hash) { + Poll::Ready(()) + } else { + Poll::Pending + } + })); // However peer 1 should still not have the block. assert!(!net.peer(1).has_block(&block_hash)); diff --git a/client/peerset/Cargo.toml b/client/peerset/Cargo.toml index 5962620d6e06e..a7e9130cfff1c 100644 --- a/client/peerset/Cargo.toml +++ b/client/peerset/Cargo.toml @@ -13,7 +13,6 @@ readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] - [dependencies] futures = "0.3.9" libp2p = { version = "0.39.1", default-features = false } diff --git a/client/peerset/src/lib.rs b/client/peerset/src/lib.rs index 9c6c5617c34b1..7fbda1ba7b7f8 100644 --- a/client/peerset/src/lib.rs +++ b/client/peerset/src/lib.rs @@ -34,7 +34,7 @@ mod peersstate; -use futures::prelude::*; +use futures::{channel::oneshot, prelude::*}; use log::{debug, error, trace}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use serde_json::json; @@ -49,7 +49,7 @@ use wasm_timer::Delay; pub use libp2p::PeerId; /// We don't accept nodes whose reputation is under this value. -const BANNED_THRESHOLD: i32 = 82 * (i32::MIN / 100); +pub const BANNED_THRESHOLD: i32 = 82 * (i32::MIN / 100); /// Reputation change for a node when we get disconnected from it. const DISCONNECT_REPUTATION_CHANGE: i32 = -256; /// Amount of time between the moment we disconnect from a node and the moment we remove it from @@ -65,6 +65,7 @@ enum Action { ReportPeer(PeerId, ReputationChange), AddToPeersSet(SetId, PeerId), RemoveFromPeersSet(SetId, PeerId), + PeerReputation(PeerId, oneshot::Sender), } /// Identifier of a set in the peerset. @@ -78,13 +79,13 @@ pub struct SetId(usize); impl SetId { pub const fn from(id: usize) -> Self { - SetId(id) + Self(id) } } impl From for SetId { fn from(id: usize) -> Self { - SetId(id) + Self(id) } } @@ -106,12 +107,12 @@ pub struct ReputationChange { impl ReputationChange { /// New reputation change with given delta and reason. pub const fn new(value: i32, reason: &'static str) -> ReputationChange { - ReputationChange { value, reason } + Self { value, reason } } /// New reputation change that forces minimum possible reputation. pub const fn new_fatal(reason: &'static str) -> ReputationChange { - ReputationChange { value: i32::MIN, reason } + Self { value: i32::MIN, reason } } } @@ -165,6 +166,16 @@ impl PeersetHandle { pub fn remove_from_peers_set(&self, set_id: SetId, peer_id: PeerId) { let _ = self.tx.unbounded_send(Action::RemoveFromPeersSet(set_id, peer_id)); } + + /// Returns the reputation value of the peer. + pub async fn peer_reputation(self, peer_id: PeerId) -> Result { + let (tx, rx) = oneshot::channel(); + + let _ = self.tx.unbounded_send(Action::PeerReputation(peer_id, tx)); + + // The channel can only be closed if the peerset no longer exists. + rx.await.map_err(|_| ()) + } } /// Message that can be sent by the peer set manager (PSM). @@ -197,8 +208,8 @@ pub enum Message { pub struct IncomingIndex(pub u64); impl From for IncomingIndex { - fn from(val: u64) -> IncomingIndex { - IncomingIndex(val) + fn from(val: u64) -> Self { + Self(val) } } @@ -263,7 +274,7 @@ pub struct Peerset { impl Peerset { /// Builds a new peerset from the given configuration. - pub fn from_config(config: PeersetConfig) -> (Peerset, PeersetHandle) { + pub fn from_config(config: PeersetConfig) -> (Self, PeersetHandle) { let (tx, rx) = tracing_unbounded("mpsc_peerset_messages"); let handle = PeersetHandle { tx: tx.clone() }; @@ -271,7 +282,7 @@ impl Peerset { let mut peerset = { let now = Instant::now(); - Peerset { + Self { data: peersstate::PeersState::new(config.sets.iter().map(|set| { peersstate::SetConfig { in_peers: set.in_peers, out_peers: set.out_peers } })), @@ -311,7 +322,7 @@ impl Peerset { } fn on_add_reserved_peer(&mut self, set_id: SetId, peer_id: PeerId) { - let newly_inserted = self.reserved_nodes[set_id.0].0.insert(peer_id.clone()); + let newly_inserted = self.reserved_nodes[set_id.0].0.insert(peer_id); if !newly_inserted { return } @@ -411,8 +422,7 @@ impl Peerset { match self.data.peer(set_id.0, &peer_id) { peersstate::Peer::Connected(peer) => { - self.message_queue - .push_back(Message::Drop { set_id, peer_id: peer.peer_id().clone() }); + self.message_queue.push_back(Message::Drop { set_id, peer_id: *peer.peer_id() }); peer.disconnect().forget_peer(); }, peersstate::Peer::NotConnected(peer) => { @@ -454,6 +464,11 @@ impl Peerset { } } + fn on_peer_reputation(&mut self, peer_id: PeerId, pending_response: oneshot::Sender) { + let reputation = self.data.peer_reputation(peer_id); + let _ = pending_response.send(reputation.reputation()); + } + /// Updates the value of `self.latest_time_update` and performs all the updates that happen /// over time, such as reputation increases for staying connected. fn update_time(&mut self) { @@ -744,6 +759,8 @@ impl Stream for Peerset { self.add_to_peers_set(sets_name, peer_id), Action::RemoveFromPeersSet(sets_name, peer_id) => self.on_remove_from_peers_set(sets_name, peer_id), + Action::PeerReputation(peer_id, pending_response) => + self.on_peer_reputation(peer_id, pending_response), } } } @@ -801,8 +818,8 @@ mod tests { }; let (peerset, handle) = Peerset::from_config(config); - handle.add_reserved_peer(SetId::from(0), reserved_peer.clone()); - handle.add_reserved_peer(SetId::from(0), reserved_peer2.clone()); + handle.add_reserved_peer(SetId::from(0), reserved_peer); + handle.add_reserved_peer(SetId::from(0), reserved_peer2); assert_messages( peerset, @@ -827,22 +844,22 @@ mod tests { sets: vec![SetConfig { in_peers: 2, out_peers: 1, - bootnodes: vec![bootnode.clone()], + bootnodes: vec![bootnode], reserved_nodes: Default::default(), reserved_only: false, }], }; let (mut peerset, _handle) = Peerset::from_config(config); - peerset.incoming(SetId::from(0), incoming.clone(), ii); - peerset.incoming(SetId::from(0), incoming.clone(), ii4); - peerset.incoming(SetId::from(0), incoming2.clone(), ii2); - peerset.incoming(SetId::from(0), incoming3.clone(), ii3); + peerset.incoming(SetId::from(0), incoming, ii); + peerset.incoming(SetId::from(0), incoming, ii4); + peerset.incoming(SetId::from(0), incoming2, ii2); + peerset.incoming(SetId::from(0), incoming3, ii3); assert_messages( peerset, vec![ - Message::Connect { set_id: SetId::from(0), peer_id: bootnode.clone() }, + Message::Connect { set_id: SetId::from(0), peer_id: bootnode }, Message::Accept(ii), Message::Accept(ii2), Message::Reject(ii3), @@ -865,7 +882,7 @@ mod tests { }; let (mut peerset, _) = Peerset::from_config(config); - peerset.incoming(SetId::from(0), incoming.clone(), ii); + peerset.incoming(SetId::from(0), incoming, ii); assert_messages(peerset, vec![Message::Reject(ii)]); } @@ -879,15 +896,15 @@ mod tests { sets: vec![SetConfig { in_peers: 0, out_peers: 2, - bootnodes: vec![bootnode.clone()], + bootnodes: vec![bootnode], reserved_nodes: Default::default(), reserved_only: false, }], }; let (mut peerset, _handle) = Peerset::from_config(config); - peerset.add_to_peers_set(SetId::from(0), discovered.clone()); - peerset.add_to_peers_set(SetId::from(0), discovered.clone()); + peerset.add_to_peers_set(SetId::from(0), discovered); + peerset.add_to_peers_set(SetId::from(0), discovered); peerset.add_to_peers_set(SetId::from(0), discovered2); assert_messages( @@ -913,7 +930,7 @@ mod tests { // We ban a node by setting its reputation under the threshold. let peer_id = PeerId::random(); - handle.report_peer(peer_id.clone(), ReputationChange::new(BANNED_THRESHOLD - 1, "")); + handle.report_peer(peer_id, ReputationChange::new(BANNED_THRESHOLD - 1, "")); let fut = futures::future::poll_fn(move |cx| { // We need one polling for the message to be processed. @@ -956,7 +973,7 @@ mod tests { // We ban a node by setting its reputation under the threshold. let peer_id = PeerId::random(); - handle.report_peer(peer_id.clone(), ReputationChange::new(BANNED_THRESHOLD - 1, "")); + handle.report_peer(peer_id, ReputationChange::new(BANNED_THRESHOLD - 1, "")); let fut = futures::future::poll_fn(move |cx| { // We need one polling for the message to be processed. diff --git a/client/peerset/src/peersstate.rs b/client/peerset/src/peersstate.rs index 7717620eae3a7..d7a9ef9135876 100644 --- a/client/peerset/src/peersstate.rs +++ b/client/peerset/src/peersstate.rs @@ -105,8 +105,8 @@ struct Node { } impl Node { - fn new(num_sets: usize) -> Node { - Node { sets: (0..num_sets).map(|_| MembershipState::NotMember).collect(), reputation: 0 } + fn new(num_sets: usize) -> Self { + Self { sets: (0..num_sets).map(|_| MembershipState::NotMember).collect(), reputation: 0 } } } @@ -128,21 +128,24 @@ enum MembershipState { } impl MembershipState { - /// Returns `true` for `In` and `Out`. + /// Returns `true` for [`MembershipState::In`] and [`MembershipState::Out`]. fn is_connected(self) -> bool { match self { - MembershipState::NotMember => false, - MembershipState::In => true, - MembershipState::Out => true, - MembershipState::NotConnected { .. } => false, + Self::In | Self::Out => true, + Self::NotMember | Self::NotConnected { .. } => false, } } + + /// Returns `true` for [`MembershipState::NotConnected`]. + fn is_not_connected(self) -> bool { + matches!(self, Self::NotConnected { .. }) + } } impl PeersState { - /// Builds a new empty `PeersState`. + /// Builds a new empty [`PeersState`]. pub fn new(sets: impl IntoIterator) -> Self { - PeersState { + Self { nodes: HashMap::new(), sets: sets .into_iter() @@ -242,12 +245,7 @@ impl PeersState { let outcome = self .nodes .iter_mut() - .filter(|(_, Node { sets, .. })| match sets[set] { - MembershipState::NotMember => false, - MembershipState::In => false, - MembershipState::Out => false, - MembershipState::NotConnected { .. } => true, - }) + .filter(|(_, Node { sets, .. })| sets[set].is_not_connected()) .fold(None::<(&PeerId, &mut Node)>, |mut cur_node, to_try| { if let Some(cur_node) = cur_node.take() { if cur_node.1.reputation >= to_try.1.reputation { @@ -318,35 +316,32 @@ pub enum Peer<'a> { } impl<'a> Peer<'a> { - /// If we are the `Connected` variant, returns the inner `ConnectedPeer`. Returns `None` + /// If we are the `Connected` variant, returns the inner [`ConnectedPeer`]. Returns `None` /// otherwise. pub fn into_connected(self) -> Option> { match self { - Peer::Connected(peer) => Some(peer), - Peer::NotConnected(_) => None, - Peer::Unknown(_) => None, + Self::Connected(peer) => Some(peer), + Self::NotConnected(..) | Self::Unknown(..) => None, } } - /// If we are the `Unknown` variant, returns the inner `ConnectedPeer`. Returns `None` + /// If we are the `NotConnected` variant, returns the inner [`NotConnectedPeer`]. Returns `None` /// otherwise. #[cfg(test)] // Feel free to remove this if this function is needed outside of tests pub fn into_not_connected(self) -> Option> { match self { - Peer::Connected(_) => None, - Peer::NotConnected(peer) => Some(peer), - Peer::Unknown(_) => None, + Self::NotConnected(peer) => Some(peer), + Self::Connected(..) | Self::Unknown(..) => None, } } - /// If we are the `Unknown` variant, returns the inner `ConnectedPeer`. Returns `None` + /// If we are the `Unknown` variant, returns the inner [`UnknownPeer`]. Returns `None` /// otherwise. #[cfg(test)] // Feel free to remove this if this function is needed outside of tests pub fn into_unknown(self) -> Option> { match self { - Peer::Connected(_) => None, - Peer::NotConnected(_) => None, - Peer::Unknown(peer) => Some(peer), + Self::Unknown(peer) => Some(peer), + Self::Connected(..) | Self::NotConnected(..) => None, } } } @@ -473,7 +468,7 @@ impl<'a> NotConnectedPeer<'a> { /// the slots are full, the node stays "not connected" and we return `Err`. /// /// Non-slot-occupying nodes don't count towards the number of slots. - pub fn try_outgoing(self) -> Result, NotConnectedPeer<'a>> { + pub fn try_outgoing(self) -> Result, Self> { let is_no_slot_occupy = self.state.sets[self.set].no_slot_nodes.contains(&*self.peer_id); // Note that it is possible for num_out to be strictly superior to the max, in case we were @@ -500,7 +495,7 @@ impl<'a> NotConnectedPeer<'a> { /// the slots are full, the node stays "not connected" and we return `Err`. /// /// Non-slot-occupying nodes don't count towards the number of slots. - pub fn try_accept_incoming(self) -> Result, NotConnectedPeer<'a>> { + pub fn try_accept_incoming(self) -> Result, Self> { let is_no_slot_occupy = self.state.sets[self.set].no_slot_nodes.contains(&*self.peer_id); // Note that it is possible for num_in to be strictly superior to the max, in case we were diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 5120cc8f4dfaa..735f215c82b36 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -86,4 +86,4 @@ directories = "3.0.2" [dev-dependencies] substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime/" } -async-std = { version = "1.6.5", default-features = false } +async-std = { version = "1.10.0", default-features = false } diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index 9b8774ce6d497..d7a8b6f227e8f 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -41,7 +41,7 @@ use std::{cell::RefCell, panic::UnwindSafe, result, sync::Arc}; pub struct LocalCallExecutor { backend: Arc, executor: E, - wasm_override: Option>, + wasm_override: Option, wasm_substitutes: WasmSubstitutes, spawn_handle: Box, client_config: ClientConfig, @@ -62,7 +62,7 @@ where let wasm_override = client_config .wasm_runtime_overrides .as_ref() - .map(|p| WasmOverride::new(p.clone(), executor.clone())) + .map(|p| WasmOverride::new(p.clone(), &executor)) .transpose()?; let wasm_substitutes = WasmSubstitutes::new( @@ -371,7 +371,7 @@ mod tests { 1, ); - let overrides = crate::client::wasm_override::dummy_overrides(&executor); + let overrides = crate::client::wasm_override::dummy_overrides(); let onchain_code = WrappedRuntimeCode(substrate_test_runtime::wasm_binary_unwrap().into()); let onchain_code = RuntimeCode { code_fetcher: &onchain_code, diff --git a/client/service/src/client/wasm_override.rs b/client/service/src/client/wasm_override.rs index 6d5a071269d4d..3d28467a9cbd9 100644 --- a/client/service/src/client/wasm_override.rs +++ b/client/service/src/client/wasm_override.rs @@ -104,22 +104,19 @@ impl From for sp_blockchain::Error { /// Scrapes WASM from a folder and returns WASM from that folder /// if the runtime spec version matches. #[derive(Clone, Debug)] -pub struct WasmOverride { +pub struct WasmOverride { // Map of runtime spec version -> Wasm Blob overrides: HashMap, - executor: E, } -impl WasmOverride -where - E: RuntimeVersionOf + Clone + 'static, -{ - pub fn new

(path: P, executor: E) -> Result +impl WasmOverride { + pub fn new(path: P, executor: &E) -> Result where P: AsRef, + E: RuntimeVersionOf, { - let overrides = Self::scrape_overrides(path.as_ref(), &executor)?; - Ok(Self { overrides, executor }) + let overrides = Self::scrape_overrides(path.as_ref(), executor)?; + Ok(Self { overrides }) } /// Gets an override by it's runtime spec version. @@ -131,7 +128,10 @@ where /// Scrapes a folder for WASM runtimes. /// Returns a hashmap of the runtime version and wasm runtime code. - fn scrape_overrides(dir: &Path, executor: &E) -> Result> { + fn scrape_overrides(dir: &Path, executor: &E) -> Result> + where + E: RuntimeVersionOf, + { let handle_err = |e: std::io::Error| -> sp_blockchain::Error { WasmOverrideError::Io(dir.to_owned(), e).into() }; @@ -176,11 +176,14 @@ where Ok(overrides) } - fn runtime_version( + fn runtime_version( executor: &E, code: &WasmBlob, heap_pages: Option, - ) -> Result { + ) -> Result + where + E: RuntimeVersionOf, + { let mut ext = BasicExternalities::default(); executor .runtime_version(&mut ext, &code.runtime_code(heap_pages)) @@ -190,15 +193,12 @@ where /// Returns a WasmOverride struct filled with dummy data for testing. #[cfg(test)] -pub fn dummy_overrides(executor: &E) -> WasmOverride -where - E: RuntimeVersionOf + Clone + 'static, -{ +pub fn dummy_overrides() -> WasmOverride { let mut overrides = HashMap::new(); overrides.insert(0, WasmBlob::new(vec![0, 0, 0, 0, 0, 0, 0, 0])); overrides.insert(1, WasmBlob::new(vec![1, 1, 1, 1, 1, 1, 1, 1])); overrides.insert(2, WasmBlob::new(vec![2, 2, 2, 2, 2, 2, 2, 2])); - WasmOverride { overrides, executor: executor.clone() } + WasmOverride { overrides } } #[cfg(test)] diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 295e941f7ceb1..d82a839936d7d 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -481,25 +481,7 @@ fn best_containing_with_genesis_block() { assert_eq!( genesis_hash.clone(), - block_on(longest_chain_select.finality_target(genesis_hash.clone(), None)) - .unwrap() - .unwrap(), - ); -} - -#[test] -fn best_containing_with_hash_not_found() { - // block tree: - // G - - let (client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); - - let uninserted_block = client.new_block(Default::default()).unwrap().build().unwrap().block; - - assert_eq!( - None, - block_on(longest_chain_select.finality_target(uninserted_block.hash().clone(), None)) - .unwrap(), + block_on(longest_chain_select.finality_target(genesis_hash.clone(), None)).unwrap(), ); } @@ -675,22 +657,10 @@ fn best_containing_on_longest_chain_with_single_chain_3_blocks() { assert_eq!( a2.hash(), - block_on(longest_chain_select.finality_target(genesis_hash, None)) - .unwrap() - .unwrap() - ); - assert_eq!( - a2.hash(), - block_on(longest_chain_select.finality_target(a1.hash(), None)) - .unwrap() - .unwrap() - ); - assert_eq!( - a2.hash(), - block_on(longest_chain_select.finality_target(a2.hash(), None)) - .unwrap() - .unwrap() + block_on(longest_chain_select.finality_target(genesis_hash, None)).unwrap() ); + assert_eq!(a2.hash(), block_on(longest_chain_select.finality_target(a1.hash(), None)).unwrap()); + assert_eq!(a2.hash(), block_on(longest_chain_select.finality_target(a2.hash(), None)).unwrap()); } #[test] @@ -819,343 +789,101 @@ fn best_containing_on_longest_chain_with_multiple_forks() { assert!(leaves.contains(&d2.hash())); assert_eq!(leaves.len(), 4); + let finality_target = |target_hash, number| { + block_on(longest_chain_select.finality_target(target_hash, number)).unwrap() + }; + // search without restriction - assert_eq!( - a5.hash(), - block_on(longest_chain_select.finality_target(genesis_hash, None)) - .unwrap() - .unwrap() - ); - assert_eq!( - a5.hash(), - block_on(longest_chain_select.finality_target(a1.hash(), None)) - .unwrap() - .unwrap() - ); - assert_eq!( - a5.hash(), - block_on(longest_chain_select.finality_target(a2.hash(), None)) - .unwrap() - .unwrap() - ); - assert_eq!( - a5.hash(), - block_on(longest_chain_select.finality_target(a3.hash(), None)) - .unwrap() - .unwrap() - ); - assert_eq!( - a5.hash(), - block_on(longest_chain_select.finality_target(a4.hash(), None)) - .unwrap() - .unwrap() - ); - assert_eq!( - a5.hash(), - block_on(longest_chain_select.finality_target(a5.hash(), None)) - .unwrap() - .unwrap() - ); - assert_eq!( - b4.hash(), - block_on(longest_chain_select.finality_target(b2.hash(), None)) - .unwrap() - .unwrap() - ); - assert_eq!( - b4.hash(), - block_on(longest_chain_select.finality_target(b3.hash(), None)) - .unwrap() - .unwrap() - ); - assert_eq!( - b4.hash(), - block_on(longest_chain_select.finality_target(b4.hash(), None)) - .unwrap() - .unwrap() - ); - assert_eq!( - c3.hash(), - block_on(longest_chain_select.finality_target(c3.hash(), None)) - .unwrap() - .unwrap() - ); - assert_eq!( - d2.hash(), - block_on(longest_chain_select.finality_target(d2.hash(), None)) - .unwrap() - .unwrap() - ); + assert_eq!(a5.hash(), finality_target(genesis_hash, None)); + assert_eq!(a5.hash(), finality_target(a1.hash(), None)); + assert_eq!(a5.hash(), finality_target(a2.hash(), None)); + assert_eq!(a5.hash(), finality_target(a3.hash(), None)); + assert_eq!(a5.hash(), finality_target(a4.hash(), None)); + assert_eq!(a5.hash(), finality_target(a5.hash(), None)); + assert_eq!(b4.hash(), finality_target(b2.hash(), None)); + assert_eq!(b4.hash(), finality_target(b3.hash(), None)); + assert_eq!(b4.hash(), finality_target(b4.hash(), None)); + assert_eq!(c3.hash(), finality_target(c3.hash(), None)); + assert_eq!(d2.hash(), finality_target(d2.hash(), None)); // search only blocks with number <= 5. equivalent to without restriction for this scenario - assert_eq!( - a5.hash(), - block_on(longest_chain_select.finality_target(genesis_hash, Some(5))) - .unwrap() - .unwrap() - ); - assert_eq!( - a5.hash(), - block_on(longest_chain_select.finality_target(a1.hash(), Some(5))) - .unwrap() - .unwrap() - ); - assert_eq!( - a5.hash(), - block_on(longest_chain_select.finality_target(a2.hash(), Some(5))) - .unwrap() - .unwrap() - ); - assert_eq!( - a5.hash(), - block_on(longest_chain_select.finality_target(a3.hash(), Some(5))) - .unwrap() - .unwrap() - ); - assert_eq!( - a5.hash(), - block_on(longest_chain_select.finality_target(a4.hash(), Some(5))) - .unwrap() - .unwrap() - ); - assert_eq!( - a5.hash(), - block_on(longest_chain_select.finality_target(a5.hash(), Some(5))) - .unwrap() - .unwrap() - ); - assert_eq!( - b4.hash(), - block_on(longest_chain_select.finality_target(b2.hash(), Some(5))) - .unwrap() - .unwrap() - ); - assert_eq!( - b4.hash(), - block_on(longest_chain_select.finality_target(b3.hash(), Some(5))) - .unwrap() - .unwrap() - ); - assert_eq!( - b4.hash(), - block_on(longest_chain_select.finality_target(b4.hash(), Some(5))) - .unwrap() - .unwrap() - ); - assert_eq!( - c3.hash(), - block_on(longest_chain_select.finality_target(c3.hash(), Some(5))) - .unwrap() - .unwrap() - ); - assert_eq!( - d2.hash(), - block_on(longest_chain_select.finality_target(d2.hash(), Some(5))) - .unwrap() - .unwrap() - ); + assert_eq!(a5.hash(), finality_target(genesis_hash, Some(5))); + assert_eq!(a5.hash(), finality_target(a1.hash(), Some(5))); + assert_eq!(a5.hash(), finality_target(a2.hash(), Some(5))); + assert_eq!(a5.hash(), finality_target(a3.hash(), Some(5))); + assert_eq!(a5.hash(), finality_target(a4.hash(), Some(5))); + assert_eq!(a5.hash(), finality_target(a5.hash(), Some(5))); + assert_eq!(b4.hash(), finality_target(b2.hash(), Some(5))); + assert_eq!(b4.hash(), finality_target(b3.hash(), Some(5))); + assert_eq!(b4.hash(), finality_target(b4.hash(), Some(5))); + assert_eq!(c3.hash(), finality_target(c3.hash(), Some(5))); + assert_eq!(d2.hash(), finality_target(d2.hash(), Some(5))); // search only blocks with number <= 4 - assert_eq!( - a4.hash(), - block_on(longest_chain_select.finality_target(genesis_hash, Some(4))) - .unwrap() - .unwrap() - ); - assert_eq!( - a4.hash(), - block_on(longest_chain_select.finality_target(a1.hash(), Some(4))) - .unwrap() - .unwrap() - ); - assert_eq!( - a4.hash(), - block_on(longest_chain_select.finality_target(a2.hash(), Some(4))) - .unwrap() - .unwrap() - ); - assert_eq!( - a4.hash(), - block_on(longest_chain_select.finality_target(a3.hash(), Some(4))) - .unwrap() - .unwrap() - ); - assert_eq!( - a4.hash(), - block_on(longest_chain_select.finality_target(a4.hash(), Some(4))) - .unwrap() - .unwrap() - ); - assert_eq!(None, block_on(longest_chain_select.finality_target(a5.hash(), Some(4))).unwrap()); - assert_eq!( - b4.hash(), - block_on(longest_chain_select.finality_target(b2.hash(), Some(4))) - .unwrap() - .unwrap() - ); - assert_eq!( - b4.hash(), - block_on(longest_chain_select.finality_target(b3.hash(), Some(4))) - .unwrap() - .unwrap() - ); - assert_eq!( - b4.hash(), - block_on(longest_chain_select.finality_target(b4.hash(), Some(4))) - .unwrap() - .unwrap() - ); - assert_eq!( - c3.hash(), - block_on(longest_chain_select.finality_target(c3.hash(), Some(4))) - .unwrap() - .unwrap() - ); - assert_eq!( - d2.hash(), - block_on(longest_chain_select.finality_target(d2.hash(), Some(4))) - .unwrap() - .unwrap() - ); + assert_eq!(a4.hash(), finality_target(genesis_hash, Some(4))); + assert_eq!(a4.hash(), finality_target(a1.hash(), Some(4))); + assert_eq!(a4.hash(), finality_target(a2.hash(), Some(4))); + assert_eq!(a4.hash(), finality_target(a3.hash(), Some(4))); + assert_eq!(a4.hash(), finality_target(a4.hash(), Some(4))); + assert_eq!(a5.hash(), finality_target(a5.hash(), Some(4))); + assert_eq!(b4.hash(), finality_target(b2.hash(), Some(4))); + assert_eq!(b4.hash(), finality_target(b3.hash(), Some(4))); + assert_eq!(b4.hash(), finality_target(b4.hash(), Some(4))); + assert_eq!(c3.hash(), finality_target(c3.hash(), Some(4))); + assert_eq!(d2.hash(), finality_target(d2.hash(), Some(4))); // search only blocks with number <= 3 - assert_eq!( - a3.hash(), - block_on(longest_chain_select.finality_target(genesis_hash, Some(3))) - .unwrap() - .unwrap() - ); - assert_eq!( - a3.hash(), - block_on(longest_chain_select.finality_target(a1.hash(), Some(3))) - .unwrap() - .unwrap() - ); - assert_eq!( - a3.hash(), - block_on(longest_chain_select.finality_target(a2.hash(), Some(3))) - .unwrap() - .unwrap() - ); - assert_eq!( - a3.hash(), - block_on(longest_chain_select.finality_target(a3.hash(), Some(3))) - .unwrap() - .unwrap() - ); - assert_eq!(None, block_on(longest_chain_select.finality_target(a4.hash(), Some(3))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(a5.hash(), Some(3))).unwrap()); - assert_eq!( - b3.hash(), - block_on(longest_chain_select.finality_target(b2.hash(), Some(3))) - .unwrap() - .unwrap() - ); - assert_eq!( - b3.hash(), - block_on(longest_chain_select.finality_target(b3.hash(), Some(3))) - .unwrap() - .unwrap() - ); - assert_eq!(None, block_on(longest_chain_select.finality_target(b4.hash(), Some(3))).unwrap()); - assert_eq!( - c3.hash(), - block_on(longest_chain_select.finality_target(c3.hash(), Some(3))) - .unwrap() - .unwrap() - ); - assert_eq!( - d2.hash(), - block_on(longest_chain_select.finality_target(d2.hash(), Some(3))) - .unwrap() - .unwrap() - ); + assert_eq!(a3.hash(), finality_target(genesis_hash, Some(3))); + assert_eq!(a3.hash(), finality_target(a1.hash(), Some(3))); + assert_eq!(a3.hash(), finality_target(a2.hash(), Some(3))); + assert_eq!(a3.hash(), finality_target(a3.hash(), Some(3))); + assert_eq!(a4.hash(), finality_target(a4.hash(), Some(3))); + assert_eq!(a5.hash(), finality_target(a5.hash(), Some(3))); + assert_eq!(b3.hash(), finality_target(b2.hash(), Some(3))); + assert_eq!(b3.hash(), finality_target(b3.hash(), Some(3))); + assert_eq!(b4.hash(), finality_target(b4.hash(), Some(3))); + assert_eq!(c3.hash(), finality_target(c3.hash(), Some(3))); + assert_eq!(d2.hash(), finality_target(d2.hash(), Some(3))); // search only blocks with number <= 2 - assert_eq!( - a2.hash(), - block_on(longest_chain_select.finality_target(genesis_hash, Some(2))) - .unwrap() - .unwrap() - ); - assert_eq!( - a2.hash(), - block_on(longest_chain_select.finality_target(a1.hash(), Some(2))) - .unwrap() - .unwrap() - ); - assert_eq!( - a2.hash(), - block_on(longest_chain_select.finality_target(a2.hash(), Some(2))) - .unwrap() - .unwrap() - ); - assert_eq!(None, block_on(longest_chain_select.finality_target(a3.hash(), Some(2))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(a4.hash(), Some(2))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(a5.hash(), Some(2))).unwrap()); - assert_eq!( - b2.hash(), - block_on(longest_chain_select.finality_target(b2.hash(), Some(2))) - .unwrap() - .unwrap() - ); - assert_eq!(None, block_on(longest_chain_select.finality_target(b3.hash(), Some(2))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(b4.hash(), Some(2))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(c3.hash(), Some(2))).unwrap()); - assert_eq!( - d2.hash(), - block_on(longest_chain_select.finality_target(d2.hash(), Some(2))) - .unwrap() - .unwrap() - ); + assert_eq!(a2.hash(), finality_target(genesis_hash, Some(2))); + assert_eq!(a2.hash(), finality_target(a1.hash(), Some(2))); + assert_eq!(a2.hash(), finality_target(a2.hash(), Some(2))); + assert_eq!(a3.hash(), finality_target(a3.hash(), Some(2))); + assert_eq!(a4.hash(), finality_target(a4.hash(), Some(2))); + assert_eq!(a5.hash(), finality_target(a5.hash(), Some(2))); + assert_eq!(b2.hash(), finality_target(b2.hash(), Some(2))); + assert_eq!(b3.hash(), finality_target(b3.hash(), Some(2))); + assert_eq!(b4.hash(), finality_target(b4.hash(), Some(2))); + assert_eq!(c3.hash(), finality_target(c3.hash(), Some(2))); + assert_eq!(d2.hash(), finality_target(d2.hash(), Some(2))); // search only blocks with number <= 1 - assert_eq!( - a1.hash(), - block_on(longest_chain_select.finality_target(genesis_hash, Some(1))) - .unwrap() - .unwrap() - ); - assert_eq!( - a1.hash(), - block_on(longest_chain_select.finality_target(a1.hash(), Some(1))) - .unwrap() - .unwrap() - ); - assert_eq!(None, block_on(longest_chain_select.finality_target(a2.hash(), Some(1))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(a3.hash(), Some(1))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(a4.hash(), Some(1))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(a5.hash(), Some(1))).unwrap()); - - assert_eq!(None, block_on(longest_chain_select.finality_target(b2.hash(), Some(1))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(b3.hash(), Some(1))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(b4.hash(), Some(1))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(c3.hash(), Some(1))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(d2.hash(), Some(1))).unwrap()); + assert_eq!(a1.hash(), finality_target(genesis_hash, Some(1))); + assert_eq!(a1.hash(), finality_target(a1.hash(), Some(1))); + assert_eq!(a2.hash(), finality_target(a2.hash(), Some(1))); + assert_eq!(a3.hash(), finality_target(a3.hash(), Some(1))); + assert_eq!(a4.hash(), finality_target(a4.hash(), Some(1))); + assert_eq!(a5.hash(), finality_target(a5.hash(), Some(1))); + + assert_eq!(b2.hash(), finality_target(b2.hash(), Some(1))); + assert_eq!(b3.hash(), finality_target(b3.hash(), Some(1))); + assert_eq!(b4.hash(), finality_target(b4.hash(), Some(1))); + assert_eq!(c3.hash(), finality_target(c3.hash(), Some(1))); + assert_eq!(d2.hash(), finality_target(d2.hash(), Some(1))); // search only blocks with number <= 0 - assert_eq!( - genesis_hash, - block_on(longest_chain_select.finality_target(genesis_hash, Some(0))) - .unwrap() - .unwrap() - ); - assert_eq!(None, block_on(longest_chain_select.finality_target(a1.hash(), Some(0))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(a2.hash(), Some(0))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(a3.hash(), Some(0))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(a4.hash(), Some(0))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(a5.hash(), Some(0))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(b2.hash(), Some(0))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(b3.hash(), Some(0))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(b4.hash(), Some(0))).unwrap()); - assert_eq!( - None, - block_on(longest_chain_select.finality_target(c3.hash().clone(), Some(0))).unwrap(), - ); - assert_eq!( - None, - block_on(longest_chain_select.finality_target(d2.hash().clone(), Some(0))).unwrap(), - ); + assert_eq!(genesis_hash, finality_target(genesis_hash, Some(0))); + assert_eq!(a1.hash(), finality_target(a1.hash(), Some(0))); + assert_eq!(a2.hash(), finality_target(a2.hash(), Some(0))); + assert_eq!(a3.hash(), finality_target(a3.hash(), Some(0))); + assert_eq!(a4.hash(), finality_target(a4.hash(), Some(0))); + assert_eq!(a5.hash(), finality_target(a5.hash(), Some(0))); + assert_eq!(b2.hash(), finality_target(b2.hash(), Some(0))); + assert_eq!(b3.hash(), finality_target(b3.hash(), Some(0))); + assert_eq!(b4.hash(), finality_target(b4.hash(), Some(0))); + assert_eq!(c3.hash(), finality_target(c3.hash(), Some(0))); + assert_eq!(d2.hash(), finality_target(d2.hash(), Some(0))); } #[test] @@ -1177,9 +905,7 @@ fn best_containing_on_longest_chain_with_max_depth_higher_than_best() { assert_eq!( a2.hash(), - block_on(longest_chain_select.finality_target(genesis_hash, Some(10))) - .unwrap() - .unwrap(), + block_on(longest_chain_select.finality_target(genesis_hash, Some(10))).unwrap(), ); } @@ -2085,12 +1811,7 @@ fn cleans_up_closed_notification_sinks_on_block_import() { // NOTE: we need to build the client here instead of using the client // provided by test_runtime_client otherwise we can't access the private // `import_notification_sinks` and `finality_notification_sinks` fields. - let mut client = new_in_mem::< - _, - substrate_test_runtime_client::runtime::Block, - _, - substrate_test_runtime_client::runtime::RuntimeApi, - >( + let mut client = new_in_mem::<_, Block, _, RuntimeApi>( substrate_test_runtime_client::new_native_executor(), &substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), None, @@ -2108,8 +1829,8 @@ fn cleans_up_closed_notification_sinks_on_block_import() { in_mem::Backend, sc_executor::NativeElseWasmExecutor, >, - substrate_test_runtime_client::runtime::Block, - substrate_test_runtime_client::runtime::RuntimeApi, + Block, + RuntimeApi, >; let import_notif1 = client.import_notification_stream(); diff --git a/client/transaction-pool/api/src/lib.rs b/client/transaction-pool/api/src/lib.rs index a6252f1373c5d..cd8784bfc83e2 100644 --- a/client/transaction-pool/api/src/lib.rs +++ b/client/transaction-pool/api/src/lib.rs @@ -223,13 +223,14 @@ pub trait TransactionPool: Send + Sync { at: NumberFor, ) -> Pin< Box< - dyn Future> + Send>> - + Send, + dyn Future< + Output = Box> + Send>, + > + Send, >, >; /// Get an iterator for ready transactions ordered by priority. - fn ready(&self) -> Box> + Send>; + fn ready(&self) -> Box> + Send>; // *** Block production /// Remove transactions identified by given hashes (and dependent transactions) from the pool. @@ -254,6 +255,27 @@ pub trait TransactionPool: Send + Sync { fn ready_transaction(&self, hash: &TxHash) -> Option>; } +/// An iterator of ready transactions. +/// +/// The trait extends regular [`std::iter::Iterator`] trait and allows reporting +/// last-returned element as invalid. +/// +/// The implementation is then allowed, for performance reasons, to change the elements +/// returned next, by e.g. skipping elements that are known to depend on the reported +/// transaction, which yields them invalid as well. +pub trait ReadyTransactions: Iterator { + /// Report given transaction as invalid. + /// + /// This might affect subsequent elements returned by the iterator, so dependent transactions + /// are skipped for performance reasons. + fn report_invalid(&mut self, _tx: &Self::Item); +} + +/// A no-op implementation for an empty iterator. +impl ReadyTransactions for std::iter::Empty { + fn report_invalid(&mut self, _tx: &T) {} +} + /// Events that the transaction pool listens for. pub enum ChainEvent { /// New best block have been added to the chain diff --git a/client/transaction-pool/graph/Cargo.toml b/client/transaction-pool/graph/Cargo.toml deleted file mode 100644 index b49cadc51c33c..0000000000000 --- a/client/transaction-pool/graph/Cargo.toml +++ /dev/null @@ -1,39 +0,0 @@ -[package] -name = "sc-transaction-graph" -version = "4.0.0-dev" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/substrate/" -description = "Generic Transaction Pool" -readme = "README.md" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -derive_more = "0.99.2" -thiserror = "1.0.21" -futures = "0.3.9" -log = "0.4.8" -parking_lot = "0.11.1" -serde = { version = "1.0.101", features = ["derive"] } -sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } -sc-utils = { version = "4.0.0-dev", path = "../../utils" } -sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } -sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } -sp-transaction-pool = { version = "4.0.0-dev", path = "../../../primitives/transaction-pool" } -parity-util-mem = { version = "0.10.0", default-features = false, features = ["primitive-types"] } -linked-hash-map = "0.5.4" -retain_mut = "0.1.3" - -[dev-dependencies] -assert_matches = "1.3.0" -codec = { package = "parity-scale-codec", version = "2.0.0" } -substrate-test-runtime = { version = "2.0.0", path = "../../../test-utils/runtime" } -criterion = "0.3" - -[[bench]] -name = "basics" -harness = false diff --git a/client/transaction-pool/src/graph/base_pool.rs b/client/transaction-pool/src/graph/base_pool.rs index 890a87e82929d..2c8becdfb2f0b 100644 --- a/client/transaction-pool/src/graph/base_pool.rs +++ b/client/transaction-pool/src/graph/base_pool.rs @@ -36,7 +36,7 @@ use sp_runtime::{ use super::{ future::{FutureTransactions, WaitingTransaction}, - ready::ReadyTransactions, + ready::{BestIterator, ReadyTransactions}, }; /// Successful import result. @@ -355,7 +355,7 @@ impl BasePool impl Iterator>> { + pub fn ready(&self) -> BestIterator { self.ready.get() } diff --git a/client/transaction-pool/src/graph/ready.rs b/client/transaction-pool/src/graph/ready.rs index 03689aeb32e6d..99a034689ccd0 100644 --- a/client/transaction-pool/src/graph/ready.rs +++ b/client/transaction-pool/src/graph/ready.rs @@ -23,7 +23,7 @@ use std::{ sync::Arc, }; -use log::trace; +use log::{debug, trace}; use sc_transaction_pool_api::error; use serde::Serialize; use sp_runtime::{traits::Member, transaction_validity::TransactionTag as Tag}; @@ -156,11 +156,16 @@ impl ReadyTransactions { /// - transactions that are valid for a shorter time go first /// 4. Lastly we sort by the time in the queue /// - transactions that are longer in the queue go first - pub fn get(&self) -> impl Iterator>> { + /// + /// The iterator is providing a way to report transactions that the receiver considers invalid. + /// In such case the entire subgraph of transactions that depend on the reported one will be + /// skipped. + pub fn get(&self) -> BestIterator { BestIterator { all: self.ready.clone(), best: self.best.clone(), awaiting: Default::default(), + invalid: Default::default(), } } @@ -482,6 +487,7 @@ pub struct BestIterator { all: ReadOnlyTrackedMap>, awaiting: HashMap)>, best: BTreeSet>, + invalid: HashSet, } impl BestIterator { @@ -498,6 +504,34 @@ impl BestIterator { } } +impl sc_transaction_pool_api::ReadyTransactions + for BestIterator +{ + fn report_invalid(&mut self, tx: &Self::Item) { + BestIterator::report_invalid(self, tx) + } +} + +impl BestIterator { + /// Report given transaction as invalid. + /// + /// As a consequence, all values that depend on the invalid one will be skipped. + /// When given transaction is not in the pool it has no effect. + /// When invoked on a fully drained iterator it has no effect either. + pub fn report_invalid(&mut self, tx: &Arc>) { + if let Some(to_report) = self.all.read().get(&tx.hash) { + debug!( + target: "txpool", + "[{:?}] Reported as invalid. Will skip sub-chains while iterating.", + to_report.transaction.transaction.hash + ); + for hash in &to_report.unlocks { + self.invalid.insert(hash.clone()); + } + } + } +} + impl Iterator for BestIterator { type Item = Arc>; @@ -505,8 +539,19 @@ impl Iterator for BestIterator { loop { let best = self.best.iter().next_back()?.clone(); let best = self.best.take(&best)?; + let hash = &best.transaction.hash; + + // Check if the transaction was marked invalid. + if self.invalid.contains(hash) { + debug!( + target: "txpool", + "[{:?}] Skipping invalid child transaction while iterating.", + hash + ); + continue + } - let next = self.all.read().get(&best.transaction.hash).cloned(); + let next = self.all.read().get(hash).cloned(); let ready = match next { Some(ready) => ready, // The transaction is not in all, maybe it was removed in the meantime? @@ -635,10 +680,13 @@ mod tests { assert_eq!(ready.get().count(), 3); } - #[test] - fn should_return_best_transactions_in_correct_order() { - // given - let mut ready = ReadyTransactions::default(); + /// Populate the pool, with a graph that looks like so: + /// + /// tx1 -> tx2 \ + /// -> -> tx3 + /// -> tx4 -> tx5 -> tx6 + /// -> tx7 + fn populate_pool(ready: &mut ReadyTransactions>) { let mut tx1 = tx(1); tx1.requires.clear(); let mut tx2 = tx(2); @@ -649,11 +697,17 @@ mod tests { tx3.provides = vec![]; let mut tx4 = tx(4); tx4.requires = vec![tx1.provides[0].clone()]; - tx4.provides = vec![]; - let tx5 = Transaction { - data: vec![5], + tx4.provides = vec![vec![107]]; + let mut tx5 = tx(5); + tx5.requires = vec![tx4.provides[0].clone()]; + tx5.provides = vec![vec![108]]; + let mut tx6 = tx(6); + tx6.requires = vec![tx5.provides[0].clone()]; + tx6.provides = vec![]; + let tx7 = Transaction { + data: vec![7], bytes: 1, - hash: 5, + hash: 7, priority: 1, valid_till: u64::MAX, // use the max here for testing. requires: vec![tx1.provides[0].clone()], @@ -663,20 +717,30 @@ mod tests { }; // when - for tx in vec![tx1, tx2, tx3, tx4, tx5] { - import(&mut ready, tx).unwrap(); + for tx in vec![tx1, tx2, tx3, tx7, tx4, tx5, tx6] { + import(ready, tx).unwrap(); } - // then assert_eq!(ready.best.len(), 1); + } + + #[test] + fn should_return_best_transactions_in_correct_order() { + // given + let mut ready = ReadyTransactions::default(); + populate_pool(&mut ready); + // when let mut it = ready.get().map(|tx| tx.data[0]); + // then assert_eq!(it.next(), Some(1)); assert_eq!(it.next(), Some(2)); assert_eq!(it.next(), Some(3)); assert_eq!(it.next(), Some(4)); assert_eq!(it.next(), Some(5)); + assert_eq!(it.next(), Some(6)); + assert_eq!(it.next(), Some(7)); assert_eq!(it.next(), None); } @@ -725,4 +789,26 @@ mod tests { TransactionRef { transaction: Arc::new(with_priority(3, 3)), insertion_id: 2 } ); } + + #[test] + fn should_skip_invalid_transactions_while_iterating() { + // given + let mut ready = ReadyTransactions::default(); + populate_pool(&mut ready); + + // when + let mut it = ready.get(); + let data = |tx: &Arc>>| tx.data[0]; + + // then + assert_eq!(it.next().as_ref().map(data), Some(1)); + assert_eq!(it.next().as_ref().map(data), Some(2)); + assert_eq!(it.next().as_ref().map(data), Some(3)); + let tx4 = it.next(); + assert_eq!(tx4.as_ref().map(data), Some(4)); + // report 4 as invalid, which should skip 5 & 6. + it.report_invalid(&tx4.unwrap()); + assert_eq!(it.next().as_ref().map(data), Some(7)); + assert_eq!(it.next().as_ref().map(data), None); + } } diff --git a/client/transaction-pool/src/graph/validated_pool.rs b/client/transaction-pool/src/graph/validated_pool.rs index e4aad7f342b5b..dba586adc846c 100644 --- a/client/transaction-pool/src/graph/validated_pool.rs +++ b/client/transaction-pool/src/graph/validated_pool.rs @@ -25,7 +25,7 @@ use std::{ use futures::channel::mpsc::{channel, Sender}; use parking_lot::{Mutex, RwLock}; use retain_mut::RetainMut; -use sc_transaction_pool_api::{error, PoolStatus}; +use sc_transaction_pool_api::{error, PoolStatus, ReadyTransactions}; use serde::Serialize; use sp_runtime::{ generic::BlockId, @@ -630,7 +630,7 @@ impl ValidatedPool { } /// Get an iterator for ready transactions ordered by priority - pub fn ready(&self) -> impl Iterator> + Send { + pub fn ready(&self) -> impl ReadyTransactions> + Send { self.pool.read().ready() } diff --git a/client/transaction-pool/src/lib.rs b/client/transaction-pool/src/lib.rs index 6eb5bd2f332ec..4d355df22d821 100644 --- a/client/transaction-pool/src/lib.rs +++ b/client/transaction-pool/src/lib.rs @@ -56,7 +56,8 @@ use std::{ use graph::{ExtrinsicHash, IsValidator}; use sc_transaction_pool_api::{ ChainEvent, ImportNotificationStream, MaintainedTransactionPool, PoolFuture, PoolStatus, - TransactionFor, TransactionPool, TransactionSource, TransactionStatusStreamFor, TxHash, + ReadyTransactions, TransactionFor, TransactionPool, TransactionSource, + TransactionStatusStreamFor, TxHash, }; use sp_core::traits::SpawnEssentialNamed; use sp_runtime::{ @@ -69,7 +70,7 @@ use crate::metrics::MetricsLink as PrometheusMetrics; use prometheus_endpoint::Registry as PrometheusRegistry; type BoxedReadyIterator = - Box>> + Send>; + Box>> + Send>; type ReadyIteratorFor = BoxedReadyIterator, graph::ExtrinsicFor>; diff --git a/docs/CODEOWNERS b/docs/CODEOWNERS index 42d25a0a228f7..ee6382b72f1b2 100644 --- a/docs/CODEOWNERS +++ b/docs/CODEOWNERS @@ -40,6 +40,12 @@ /client/consensus/pow/ @sorpaas /primitives/consensus/pow/ @sorpaas +# BEEFY +/client/beefy/ @adoerr +/frame/beefy/ @adoerr +/frame/beefy-mmr/ @adoerr +/primitives/beefy/ @adoerr + # Contracts /frame/contracts/ @athei diff --git a/docs/CONTRIBUTING.adoc b/docs/CONTRIBUTING.adoc index b0eaec04455e4..0a9a7ebacff5b 100644 --- a/docs/CONTRIBUTING.adoc +++ b/docs/CONTRIBUTING.adoc @@ -42,7 +42,7 @@ A Pull Request (PR) needs to be reviewed and approved by project maintainers unl . PRs must be tagged with their release importance via the `C1-C9` labels. . PRs must be tagged with their audit requirements via the `D1-D9` labels. . PRs that must be backported to a stable branch must be tagged with https://github.com/paritytech/substrate/labels/E1-runtimemigration[`E0-patchthis`]. -. PRs that introduce runtime migrations must be tagged with https://github.com/paritytech/substrate/labels/E1-runtimemigration[`E1-runtimemigration`]. +. PRs that introduce runtime migrations must be tagged with https://github.com/paritytech/substrate/labels/E1-runtimemigration[`E1-runtimemigration`]. See the https://github.com/paritytech/substrate/blob/master/utils/frame/try-runtime/cli/src/lib.rs#L18[Migration Best Practices here] for more info about how to test runtime migrations. . PRs that introduce irreversible database migrations must be tagged with https://github.com/paritytech/substrate/labels/E2-databasemigration[`E2-databasemigration`]. . PRs that add host functions must be tagged with with https://github.com/paritytech/substrate/labels/E4-newhostfunctions[`E4-newhostfunctions`]. . PRs that break the external API must be tagged with https://github.com/paritytech/substrate/labels/E5-breaksapi[`E5-breaksapi`]. @@ -88,8 +88,7 @@ To create a Polkadot companion PR: - The bot will push a commit to the Polkadot PR updating its Substrate reference. - If the polkadot PR origins from a fork then a project member may need to press `approve run` on the polkadot PR. - The bot will merge the Polkadot PR once all its CI `{"build_allow_failure":false}` checks are green. - - Note: The merge-bot currently doesn't work with forks on org accounts, only individual accounts. + Note: The merge-bot currently doesn't work with forks on org accounts, only individual accounts. If your PR is reviewed well, but a Polkadot PR is missing, signal it with https://github.com/paritytech/substrate/labels/A7-needspolkadotpr[`A7-needspolkadotpr`] to prevent it from getting automatically merged. diff --git a/docs/PULL_REQUEST_TEMPLATE.md b/docs/PULL_REQUEST_TEMPLATE.md index 77f5f79f60d40..12f39371892e7 100644 --- a/docs/PULL_REQUEST_TEMPLATE.md +++ b/docs/PULL_REQUEST_TEMPLATE.md @@ -1,26 +1,32 @@ -Thank you for your Pull Request! -Before you submitting, please check that: -- [ ] You added a brief description of the PR, e.g.: +✄ ----------------------------------------------------------------------------- + +Thank you for your Pull Request! 🙏 + +Before you submit, please check that: + +- [ ] **Description:** You added a brief description of the PR, e.g.: - What does it do? - - What important points reviewers should know? + - What important points should reviewers know? - Is there something left for follow-up PRs? -- [ ] You labeled the PR appropriately if you have permissions to do so: +- [ ] **Labels:** You labeled the PR appropriately if you have permissions to do so: - [ ] `A*` for PR status (**one required**) - [ ] `B*` for changelog (**one required**) - [ ] `C*` for release notes (**exactly one required**) - [ ] `D*` for various implications/requirements - - [ ] Github's project assignment -- [ ] You mentioned a related issue if this PR related to it, e.g. `Fixes #228` or `Related #1337`. -- [ ] You asked any particular reviewers to review. If you aren't sure, start with GH suggestions. -- [ ] Your PR adheres to [the style guide](https://github.com/paritytech/substrate/blob/master/docs/STYLE_GUIDE.md) + - [ ] Github project assignment +- [ ] **Related Issues:** You mentioned a related issue if this PR is related to it, e.g. `Fixes #228` or `Related #1337`. +- [ ] **2 Reviewers:** You asked at least two reviewers to review. If you aren't sure, start with GH suggestions. +- [ ] **Style Guide:** Your PR adheres to [the style guide](https://github.com/paritytech/substrate/blob/master/docs/STYLE_GUIDE.md) - In particular, mind the maximal line length of 100 (120 in exceptional circumstances). - There is no commented code checked in unless necessary. - - Any panickers have a proof or removed. -- [ ] You bumped the runtime version if there are breaking changes in the **runtime**. -- [ ] You updated any rustdocs which may have changed -- [ ] Has the PR altered the external API or interfaces used by Polkadot? Do you have the corresponding Polkadot PR ready? + - Any panickers in the runtime have a proof or were removed. +- [ ] **Runtime Version:** You bumped the runtime version if there are breaking changes in the **runtime**. +- [ ] **Docs:** You updated any rustdocs which may need to change. +- [ ] **Polkadot Companion:** Has the PR altered the external API or interfaces used by Polkadot? + - [ ] If so, do you have the corresponding Polkadot PR ready? + - [ ] Optionally: Do you have a corresponding Cumulus PR? Refer to [the contributing guide](https://github.com/paritytech/substrate/blob/master/docs/CONTRIBUTING.adoc) for details. diff --git a/docs/README.adoc b/docs/README.adoc index 71052420b1aa9..05f81442d4ca8 100644 --- a/docs/README.adoc +++ b/docs/README.adoc @@ -250,6 +250,20 @@ If you are trying to set up Substrate on Windows, you should do the following: 7. Finally, you need to install `cmake`: https://cmake.org/download/ +==== Docker + +You can use https://github.com/paritytech/scripts/tree/master/dockerfiles/ci-linux[Parity CI docker image] with all necessary dependencies to build Substrate: + +[source, shell] +---- +#run it in the folder with the Substrate source code +docker run --rm -it -w /shellhere/substrate \ + -v $(pwd):/shellhere/substrate \ + paritytech/ci-linux:production +---- + +You can find necessary cargo commands in <> + ==== Shared Steps Then, grab the Substrate source code: diff --git a/frame/assets/src/benchmarking.rs b/frame/assets/src/benchmarking.rs index 43eadffbe8497..d9de9ed3dedd4 100644 --- a/frame/assets/src/benchmarking.rs +++ b/frame/assets/src/benchmarking.rs @@ -21,8 +21,7 @@ use super::*; use frame_benchmarking::{ - account, benchmarks_instance_pallet, impl_benchmark_test_suite, whitelist_account, - whitelisted_caller, + account, benchmarks_instance_pallet, whitelist_account, whitelisted_caller, }; use frame_support::{ dispatch::UnfilteredDispatchable, @@ -438,6 +437,6 @@ benchmarks_instance_pallet! { verify { assert_last_event::(Event::ApprovalCancelled(id, caller, delegate).into()); } -} -impl_benchmark_test_suite!(Assets, crate::mock::new_test_ext(), crate::mock::Test); + impl_benchmark_test_suite!(Assets, crate::mock::new_test_ext(), crate::mock::Test) +} diff --git a/frame/assets/src/functions.rs b/frame/assets/src/functions.rs index 81b490eaf877c..ae31b8e395194 100644 --- a/frame/assets/src/functions.rs +++ b/frame/assets/src/functions.rs @@ -478,4 +478,88 @@ impl, I: 'static> Pallet { Self::deposit_event(Event::Transferred(id, source.clone(), dest.clone(), credit)); Ok(credit) } + + /// Create a new asset without taking a deposit. + /// + /// * `id`: The `AssetId` you want the new asset to have. Must not already be in use. + /// * `owner`: The owner, issuer, admin, and freezer of this asset upon creation. + /// * `is_sufficient`: Whether this asset needs users to have an existential deposit to hold + /// this asset. + /// * `min_balance`: The minimum balance a user is allowed to have of this asset before they are + /// considered dust and cleaned up. + pub(super) fn do_force_create( + id: T::AssetId, + owner: T::AccountId, + is_sufficient: bool, + min_balance: T::Balance, + ) -> DispatchResult { + ensure!(!Asset::::contains_key(id), Error::::InUse); + ensure!(!min_balance.is_zero(), Error::::MinBalanceZero); + + Asset::::insert( + id, + AssetDetails { + owner: owner.clone(), + issuer: owner.clone(), + admin: owner.clone(), + freezer: owner.clone(), + supply: Zero::zero(), + deposit: Zero::zero(), + min_balance, + is_sufficient, + accounts: 0, + sufficients: 0, + approvals: 0, + is_frozen: false, + }, + ); + Self::deposit_event(Event::ForceCreated(id, owner)); + Ok(()) + } + + /// Destroy an existing asset. + /// + /// * `id`: The asset you want to destroy. + /// * `witness`: Witness data needed about the current state of the asset, used to confirm + /// complexity of the operation. + /// * `maybe_check_owner`: An optional check before destroying the asset, if the provided + /// account is the owner of that asset. Can be used for authorization checks. + pub(super) fn do_destroy( + id: T::AssetId, + witness: DestroyWitness, + maybe_check_owner: Option, + ) -> Result { + Asset::::try_mutate_exists(id, |maybe_details| { + let mut details = maybe_details.take().ok_or(Error::::Unknown)?; + if let Some(check_owner) = maybe_check_owner { + ensure!(details.owner == check_owner, Error::::NoPermission); + } + ensure!(details.accounts <= witness.accounts, Error::::BadWitness); + ensure!(details.sufficients <= witness.sufficients, Error::::BadWitness); + ensure!(details.approvals <= witness.approvals, Error::::BadWitness); + + for (who, v) in Account::::drain_prefix(id) { + Self::dead_account(id, &who, &mut details, v.sufficient); + } + debug_assert_eq!(details.accounts, 0); + debug_assert_eq!(details.sufficients, 0); + + let metadata = Metadata::::take(&id); + T::Currency::unreserve( + &details.owner, + details.deposit.saturating_add(metadata.deposit), + ); + + for ((owner, _), approval) in Approvals::::drain_prefix((&id,)) { + T::Currency::unreserve(&owner, approval.deposit); + } + Self::deposit_event(Event::Destroyed(id)); + + Ok(DestroyWitness { + accounts: details.accounts, + sufficients: details.sufficients, + approvals: details.approvals, + }) + }) + } } diff --git a/frame/assets/src/impl_fungibles.rs b/frame/assets/src/impl_fungibles.rs index 4e85b20a1fbb1..2e16a0910a4f0 100644 --- a/frame/assets/src/impl_fungibles.rs +++ b/frame/assets/src/impl_fungibles.rs @@ -60,6 +60,25 @@ impl, I: 'static> fungibles::Inspect<::AccountId } } +impl, I: 'static> fungibles::InspectMetadata<::AccountId> + for Pallet +{ + /// Return the name of an asset. + fn name(asset: &Self::AssetId) -> Vec { + Metadata::::get(asset).name.to_vec() + } + + /// Return the symbol of an asset. + fn symbol(asset: &Self::AssetId) -> Vec { + Metadata::::get(asset).symbol.to_vec() + } + + /// Return the decimals of an asset. + fn decimals(asset: &Self::AssetId) -> u8 { + Metadata::::get(asset).decimals + } +} + impl, I: 'static> fungibles::Mutate<::AccountId> for Pallet { fn mint_into( asset: Self::AssetId, @@ -147,3 +166,30 @@ impl, I: 'static> fungibles::Unbalanced for Pallet, I: 'static> fungibles::Create for Pallet { + fn create( + id: T::AssetId, + admin: T::AccountId, + is_sufficient: bool, + min_balance: Self::Balance, + ) -> DispatchResult { + Self::do_force_create(id, admin, is_sufficient, min_balance) + } +} + +impl, I: 'static> fungibles::Destroy for Pallet { + type DestroyWitness = DestroyWitness; + + fn get_destroy_witness(asset: &T::AssetId) -> Option { + Asset::::get(asset).map(|asset_details| asset_details.destroy_witness()) + } + + fn destroy( + id: T::AssetId, + witness: Self::DestroyWitness, + maybe_check_owner: Option, + ) -> Result { + Self::do_destroy(id, witness, maybe_check_owner) + } +} diff --git a/frame/assets/src/lib.rs b/frame/assets/src/lib.rs index 797a3ae7ee9fb..c6f24e10a89f0 100644 --- a/frame/assets/src/lib.rs +++ b/frame/assets/src/lib.rs @@ -143,6 +143,7 @@ use codec::HasCompact; use frame_support::{ dispatch::{DispatchError, DispatchResult}, ensure, + pallet_prelude::DispatchResultWithPostInfo, traits::{ tokens::{fungibles, DepositConsequence, WithdrawConsequence}, BalanceStatus::Reserved, @@ -158,6 +159,9 @@ use sp_runtime::{ }; use sp_std::{borrow::Borrow, convert::TryInto, prelude::*}; +#[cfg(feature = "std")] +use frame_support::traits::GenesisBuild; + pub use pallet::*; pub use weights::WeightInfo; @@ -179,10 +183,22 @@ pub mod pallet { type Event: From> + IsType<::Event>; /// The units in which we record balances. - type Balance: Member + Parameter + AtLeast32BitUnsigned + Default + Copy + MaxEncodedLen; + type Balance: Member + + Parameter + + AtLeast32BitUnsigned + + Default + + Copy + + MaybeSerializeDeserialize + + MaxEncodedLen; /// Identifier for the class of asset. - type AssetId: Member + Parameter + Default + Copy + HasCompact + MaxEncodedLen; + type AssetId: Member + + Parameter + + Default + + Copy + + HasCompact + + MaybeSerializeDeserialize + + MaxEncodedLen; /// The currency mechanism. type Currency: ReservableCurrency; @@ -275,6 +291,89 @@ pub mod pallet { ConstU32<300_000>, >; + #[pallet::genesis_config] + pub struct GenesisConfig, I: 'static = ()> { + /// Genesis assets: id, owner, is_sufficient, min_balance + pub assets: Vec<(T::AssetId, T::AccountId, bool, T::Balance)>, + /// Genesis metadata: id, name, symbol, decimals + pub metadata: Vec<(T::AssetId, Vec, Vec, u8)>, + /// Genesis accounts: id, account_id, balance + pub accounts: Vec<(T::AssetId, T::AccountId, T::Balance)>, + } + + #[cfg(feature = "std")] + impl, I: 'static> Default for GenesisConfig { + fn default() -> Self { + Self { + assets: Default::default(), + metadata: Default::default(), + accounts: Default::default(), + } + } + } + + #[pallet::genesis_build] + impl, I: 'static> GenesisBuild for GenesisConfig { + fn build(&self) { + for (id, owner, is_sufficient, min_balance) in &self.assets { + assert!(!Asset::::contains_key(id), "Asset id already in use"); + assert!(!min_balance.is_zero(), "Min balance should not be zero"); + Asset::::insert( + id, + AssetDetails { + owner: owner.clone(), + issuer: owner.clone(), + admin: owner.clone(), + freezer: owner.clone(), + supply: Zero::zero(), + deposit: Zero::zero(), + min_balance: *min_balance, + is_sufficient: *is_sufficient, + accounts: 0, + sufficients: 0, + approvals: 0, + is_frozen: false, + }, + ); + } + + for (id, name, symbol, decimals) in &self.metadata { + assert!(Asset::::contains_key(id), "Asset does not exist"); + + let bounded_name: BoundedVec = + name.clone().try_into().expect("asset name is too long"); + let bounded_symbol: BoundedVec = + symbol.clone().try_into().expect("asset symbol is too long"); + + let metadata = AssetMetadata { + deposit: Zero::zero(), + name: bounded_name, + symbol: bounded_symbol, + decimals: *decimals, + is_frozen: false, + }; + Metadata::::insert(id, metadata); + } + + for (id, account_id, amount) in &self.accounts { + let result = >::increase_balance( + *id, + account_id, + *amount, + |details| -> DispatchResult { + debug_assert!( + T::Balance::max_value() - details.supply >= *amount, + "checked in prep; qed" + ); + details.supply = details.supply.saturating_add(*amount); + Ok(()) + }, + ); + assert!(result.is_ok()); + } + } + } + #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event, I: 'static = ()> { @@ -437,29 +536,7 @@ pub mod pallet { ) -> DispatchResult { T::ForceOrigin::ensure_origin(origin)?; let owner = T::Lookup::lookup(owner)?; - - ensure!(!Asset::::contains_key(id), Error::::InUse); - ensure!(!min_balance.is_zero(), Error::::MinBalanceZero); - - Asset::::insert( - id, - AssetDetails { - owner: owner.clone(), - issuer: owner.clone(), - admin: owner.clone(), - freezer: owner.clone(), - supply: Zero::zero(), - deposit: Zero::zero(), - min_balance, - is_sufficient, - accounts: 0, - sufficients: 0, - approvals: 0, - is_frozen: false, - }, - ); - Self::deposit_event(Event::ForceCreated(id, owner)); - Ok(()) + Self::do_force_create(id, owner, is_sufficient, min_balance) } /// Destroy a class of fungible assets. @@ -494,39 +571,13 @@ pub mod pallet { Ok(_) => None, Err(origin) => Some(ensure_signed(origin)?), }; - Asset::::try_mutate_exists(id, |maybe_details| { - let mut details = maybe_details.take().ok_or(Error::::Unknown)?; - if let Some(check_owner) = maybe_check_owner { - ensure!(details.owner == check_owner, Error::::NoPermission); - } - ensure!(details.accounts <= witness.accounts, Error::::BadWitness); - ensure!(details.sufficients <= witness.sufficients, Error::::BadWitness); - ensure!(details.approvals <= witness.approvals, Error::::BadWitness); - - for (who, v) in Account::::drain_prefix(id) { - Self::dead_account(id, &who, &mut details, v.sufficient); - } - debug_assert_eq!(details.accounts, 0); - debug_assert_eq!(details.sufficients, 0); - - let metadata = Metadata::::take(&id); - T::Currency::unreserve( - &details.owner, - details.deposit.saturating_add(metadata.deposit), - ); - - for ((owner, _), approval) in Approvals::::drain_prefix((&id,)) { - T::Currency::unreserve(&owner, approval.deposit); - } - Self::deposit_event(Event::Destroyed(id)); - - Ok(Some(T::WeightInfo::destroy( - details.accounts.saturating_sub(details.sufficients), - details.sufficients, - details.approvals, - )) - .into()) - }) + let details = Self::do_destroy(id, witness, maybe_check_owner)?; + Ok(Some(T::WeightInfo::destroy( + details.accounts.saturating_sub(details.sufficients), + details.sufficients, + details.approvals, + )) + .into()) } /// Mint assets of a particular class. diff --git a/frame/assets/src/mock.rs b/frame/assets/src/mock.rs index 1b2602792d844..1e1ea8ba9a961 100644 --- a/frame/assets/src/mock.rs +++ b/frame/assets/src/mock.rs @@ -144,9 +144,26 @@ pub(crate) fn hooks() -> Vec { } pub(crate) fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - - let mut ext = sp_io::TestExternalities::new(t); + let mut storage = frame_system::GenesisConfig::default().build_storage::().unwrap(); + + let config: pallet_assets::GenesisConfig = pallet_assets::GenesisConfig { + assets: vec![ + // id, owner, is_sufficient, min_balance + (999, 0, true, 1), + ], + metadata: vec![ + // id, name, symbol, decimals + (999, "Token Name".into(), "TOKEN".into(), 10), + ], + accounts: vec![ + // id, account_id, balance + (999, 1, 100), + ], + }; + + config.assimilate_storage(&mut storage).unwrap(); + + let mut ext: sp_io::TestExternalities = storage.into(); ext.execute_with(|| System::set_block_number(1)); ext } diff --git a/frame/assets/src/tests.rs b/frame/assets/src/tests.rs index aab534a6e4efc..872bd7290133c 100644 --- a/frame/assets/src/tests.rs +++ b/frame/assets/src/tests.rs @@ -784,3 +784,13 @@ fn balance_conversion_should_work() { ); }); } + +#[test] +fn assets_from_genesis_should_exist() { + new_test_ext().execute_with(|| { + assert!(Asset::::contains_key(999)); + assert!(Metadata::::contains_key(999)); + assert_eq!(Assets::balance(999, 1), 100); + assert_eq!(Assets::total_supply(999), 100); + }); +} diff --git a/frame/babe/src/benchmarking.rs b/frame/babe/src/benchmarking.rs index 372dfa532a894..7747c9bd1fc8c 100644 --- a/frame/babe/src/benchmarking.rs +++ b/frame/babe/src/benchmarking.rs @@ -63,6 +63,12 @@ benchmarks! { } verify { assert!(sp_consensus_babe::check_equivocation_proof::

(equivocation_proof2)); } + + impl_benchmark_test_suite!( + Pallet, + crate::mock::new_test_ext(3), + crate::mock::Test, + ) } #[cfg(test)] @@ -70,12 +76,6 @@ mod tests { use super::*; use crate::mock::*; - frame_benchmarking::impl_benchmark_test_suite!( - Pallet, - crate::mock::new_test_ext(3), - crate::mock::Test, - ); - #[test] fn test_generate_equivocation_report_blob() { let (pairs, mut ext) = new_test_ext_with_pairs(3); diff --git a/frame/babe/src/lib.rs b/frame/babe/src/lib.rs index b39074bb3f057..4ccfdf6c13fe0 100644 --- a/frame/babe/src/lib.rs +++ b/frame/babe/src/lib.rs @@ -25,11 +25,13 @@ use codec::{Decode, Encode}; use frame_support::{ dispatch::DispatchResultWithPostInfo, traits::{ - DisabledValidators, FindAuthor, Get, KeyOwnerProofSystem, OnTimestampSet, OneSessionHandler, + ConstU32, DisabledValidators, FindAuthor, Get, KeyOwnerProofSystem, OnTimestampSet, + OneSessionHandler, }, weights::{Pays, Weight}, + BoundedVec, WeakBoundedVec, }; -use sp_application_crypto::Public; +use sp_application_crypto::{Public, TryFrom}; use sp_runtime::{ generic::DigestItem, traits::{IsMember, One, SaturatedConversion, Saturating, Zero}, @@ -100,7 +102,7 @@ impl EpochChangeTrigger for SameAuthoritiesForever { } } -const UNDER_CONSTRUCTION_SEGMENT_LENGTH: usize = 256; +const UNDER_CONSTRUCTION_SEGMENT_LENGTH: u32 = 256; type MaybeRandomness = Option; @@ -113,6 +115,7 @@ pub mod pallet { /// The BABE Pallet #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] + #[pallet::generate_storage_info] pub struct Pallet(_); #[pallet::config] @@ -169,6 +172,10 @@ pub mod pallet { type HandleEquivocation: HandleEquivocation; type WeightInfo: WeightInfo; + + /// Max number of authorities allowed + #[pallet::constant] + type MaxAuthorities: Get; } #[pallet::error] @@ -189,7 +196,11 @@ pub mod pallet { /// Current epoch authorities. #[pallet::storage] #[pallet::getter(fn authorities)] - pub type Authorities = StorageValue<_, Vec<(AuthorityId, BabeAuthorityWeight)>, ValueQuery>; + pub type Authorities = StorageValue< + _, + WeakBoundedVec<(AuthorityId, BabeAuthorityWeight), T::MaxAuthorities>, + ValueQuery, + >; /// The slot at which the first epoch actually started. This is 0 /// until the first block of the chain. @@ -229,8 +240,11 @@ pub mod pallet { /// Next epoch authorities. #[pallet::storage] - pub(super) type NextAuthorities = - StorageValue<_, Vec<(AuthorityId, BabeAuthorityWeight)>, ValueQuery>; + pub(super) type NextAuthorities = StorageValue< + _, + WeakBoundedVec<(AuthorityId, BabeAuthorityWeight), T::MaxAuthorities>, + ValueQuery, + >; /// Randomness under construction. /// @@ -246,8 +260,13 @@ pub mod pallet { /// TWOX-NOTE: `SegmentIndex` is an increasing integer, so this is okay. #[pallet::storage] - pub(super) type UnderConstruction = - StorageMap<_, Twox64Concat, u32, Vec, ValueQuery>; + pub(super) type UnderConstruction = StorageMap< + _, + Twox64Concat, + u32, + BoundedVec>, + ValueQuery, + >; /// Temporary value (cleared at block finalization) which is `Some` /// if per-block initialization has already been called for current block. @@ -503,8 +522,8 @@ impl Pallet { /// Typically, this is not handled directly by the user, but by higher-level validator-set /// manager logic like `pallet-session`. pub fn enact_epoch_change( - authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, - next_authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, + authorities: WeakBoundedVec<(AuthorityId, BabeAuthorityWeight), T::MaxAuthorities>, + next_authorities: WeakBoundedVec<(AuthorityId, BabeAuthorityWeight), T::MaxAuthorities>, ) { // PRECONDITION: caller has done initialization and is guaranteed // by the session module to be called before this. @@ -541,8 +560,10 @@ impl Pallet { // so that nodes can track changes. let next_randomness = NextRandomness::::get(); - let next_epoch = - NextEpochDescriptor { authorities: next_authorities, randomness: next_randomness }; + let next_epoch = NextEpochDescriptor { + authorities: next_authorities.to_vec(), + randomness: next_randomness, + }; Self::deposit_consensus(ConsensusLog::NextEpochData(next_epoch)); if let Some(next_config) = NextEpochConfig::::get() { @@ -571,7 +592,7 @@ impl Pallet { epoch_index: EpochIndex::::get(), start_slot: Self::current_epoch_start(), duration: T::EpochDuration::get(), - authorities: Self::authorities(), + authorities: Self::authorities().to_vec(), randomness: Self::randomness(), config: EpochConfig::::get() .expect("EpochConfig is initialized in genesis; we never `take` or `kill` it; qed"), @@ -590,7 +611,7 @@ impl Pallet { epoch_index: next_epoch_index, start_slot: Self::epoch_start(next_epoch_index), duration: T::EpochDuration::get(), - authorities: NextAuthorities::::get(), + authorities: NextAuthorities::::get().to_vec(), randomness: NextRandomness::::get(), config: NextEpochConfig::::get().unwrap_or_else(|| { EpochConfig::::get().expect( @@ -619,14 +640,18 @@ impl Pallet { fn deposit_randomness(randomness: &schnorrkel::Randomness) { let segment_idx = SegmentIndex::::get(); let mut segment = UnderConstruction::::get(&segment_idx); - if segment.len() < UNDER_CONSTRUCTION_SEGMENT_LENGTH { + if segment.try_push(*randomness).is_ok() { // push onto current segment: not full. - segment.push(*randomness); UnderConstruction::::insert(&segment_idx, &segment); } else { // move onto the next segment and update the index. let segment_idx = segment_idx + 1; - UnderConstruction::::insert(&segment_idx, &vec![randomness.clone()]); + let bounded_randomness = + BoundedVec::<_, ConstU32>::try_from(vec![ + randomness.clone(), + ]) + .expect("UNDER_CONSTRUCTION_SEGMENT_LENGTH >= 1"); + UnderConstruction::::insert(&segment_idx, bounded_randomness); SegmentIndex::::put(&segment_idx); } } @@ -667,7 +692,7 @@ impl Pallet { // we use the same values as genesis because we haven't collected any // randomness yet. let next = NextEpochDescriptor { - authorities: Self::authorities(), + authorities: Self::authorities().to_vec(), randomness: Self::randomness(), }; @@ -732,7 +757,7 @@ impl Pallet { let segment_idx: u32 = SegmentIndex::::mutate(|s| sp_std::mem::replace(s, 0)); // overestimate to the segment being full. - let rho_size = segment_idx.saturating_add(1) as usize * UNDER_CONSTRUCTION_SEGMENT_LENGTH; + let rho_size = (segment_idx.saturating_add(1) * UNDER_CONSTRUCTION_SEGMENT_LENGTH) as usize; let next_randomness = compute_randomness( this_randomness, @@ -747,8 +772,11 @@ impl Pallet { fn initialize_authorities(authorities: &[(AuthorityId, BabeAuthorityWeight)]) { if !authorities.is_empty() { assert!(Authorities::::get().is_empty(), "Authorities are already initialized!"); - Authorities::::put(authorities); - NextAuthorities::::put(authorities); + let bounded_authorities = + WeakBoundedVec::<_, T::MaxAuthorities>::try_from(authorities.to_vec()) + .expect("Initial number of authorities should be lower than T::MaxAuthorities"); + Authorities::::put(&bounded_authorities); + NextAuthorities::::put(&bounded_authorities); } } @@ -878,10 +906,24 @@ impl OneSessionHandler for Pallet { I: Iterator, { let authorities = validators.map(|(_account, k)| (k, 1)).collect::>(); + let bounded_authorities = WeakBoundedVec::<_, T::MaxAuthorities>::force_from( + authorities, + Some( + "Warning: The session has more validators than expected. \ + A runtime configuration adjustment may be needed.", + ), + ); let next_authorities = queued_validators.map(|(_account, k)| (k, 1)).collect::>(); + let next_bounded_authorities = WeakBoundedVec::<_, T::MaxAuthorities>::force_from( + next_authorities, + Some( + "Warning: The session has more queued validators than expected. \ + A runtime configuration adjustment may be needed.", + ), + ); - Self::enact_epoch_change(authorities, next_authorities) + Self::enact_epoch_change(bounded_authorities, next_bounded_authorities) } fn on_disabled(i: usize) { diff --git a/frame/babe/src/mock.rs b/frame/babe/src/mock.rs index bc0be32624cba..b504a26f60421 100644 --- a/frame/babe/src/mock.rs +++ b/frame/babe/src/mock.rs @@ -216,6 +216,7 @@ impl pallet_staking::Config for Test { type ElectionProvider = onchain::OnChainSequentialPhragmen; type GenesisElectionProvider = Self::ElectionProvider; type WeightInfo = (); + type SortedListProvider = pallet_staking::UseNominatorsMap; } impl pallet_offences::Config for Test { @@ -229,6 +230,7 @@ parameter_types! { pub const ExpectedBlockTime: u64 = 1; pub const ReportLongevity: u64 = BondingDuration::get() as u64 * SessionsPerEra::get() as u64 * EpochDuration::get(); + pub const MaxAuthorities: u32 = 10; } impl Config for Test { @@ -251,6 +253,7 @@ impl Config for Test { super::EquivocationHandler; type WeightInfo = (); + type MaxAuthorities = MaxAuthorities; } pub fn go_to_block(n: u64, s: u64) { diff --git a/frame/babe/src/tests.rs b/frame/babe/src/tests.rs index dc2f74c719519..34d861d5d97f7 100644 --- a/frame/babe/src/tests.rs +++ b/frame/babe/src/tests.rs @@ -92,7 +92,7 @@ fn first_block_epoch_zero_start() { let consensus_log = sp_consensus_babe::ConsensusLog::NextEpochData( sp_consensus_babe::digests::NextEpochDescriptor { - authorities: Babe::authorities(), + authorities: Babe::authorities().to_vec(), randomness: Babe::randomness(), }, ); diff --git a/frame/bags-list/Cargo.toml b/frame/bags-list/Cargo.toml new file mode 100644 index 0000000000000..860a6edc42143 --- /dev/null +++ b/frame/bags-list/Cargo.toml @@ -0,0 +1,66 @@ +[package] +name = "pallet-bags-list" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME pallet bags list" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +# parity +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } + +# primitives +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } + +# FRAME +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +frame-election-provider-support = { version = "4.0.0-dev", default-features = false, path = "../election-provider-support" } + +# third party +log = { version = "0.4.14", default-features = false } + +# Optional imports for benchmarking +frame-benchmarking = { version = "4.0.0-dev", path = "../benchmarking", optional = true, default-features = false } +pallet-balances = { version = "4.0.0-dev", path = "../balances", optional = true, default-features = false } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core", optional = true, default-features = false } +sp-io = { version = "4.0.0-dev", path = "../../primitives/io", optional = true, default-features = false } +sp-tracing = { version = "4.0.0-dev", path = "../../primitives/tracing", optional = true, default-features = false } + +[dev-dependencies] +sp-core = { version = "4.0.0-dev", path = "../../primitives/core"} +sp-io = { version = "4.0.0-dev", path = "../../primitives/io"} +sp-tracing = { version = "4.0.0-dev", path = "../../primitives/tracing" } +pallet-balances = { version = "4.0.0-dev", path = "../balances" } +frame-election-provider-support = { version = "4.0.0-dev", path = "../election-provider-support", features = ["runtime-benchmarks"] } +frame-benchmarking = { version = "4.0.0-dev", path = "../benchmarking" } + +[features] +default = ["std"] +std = [ + "codec/std", + "sp-runtime/std", + "sp-std/std", + "frame-support/std", + "frame-system/std", + "frame-election-provider-support/std", + "log/std", +] +runtime-benchmarks = [ + "frame-benchmarking", + "sp-core", + "sp-io", + "pallet-balances", + "sp-tracing", + "frame-election-provider-support/runtime-benchmarks", +] + diff --git a/frame/bags-list/src/benchmarks.rs b/frame/bags-list/src/benchmarks.rs new file mode 100644 index 0000000000000..d86adc674c44a --- /dev/null +++ b/frame/bags-list/src/benchmarks.rs @@ -0,0 +1,143 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarks for the bags list pallet. + +use super::*; +use crate::list::List; +use frame_benchmarking::{account, whitelisted_caller}; +use frame_election_provider_support::VoteWeightProvider; +use frame_support::{assert_ok, traits::Get}; +use frame_system::RawOrigin as SystemOrigin; + +frame_benchmarking::benchmarks! { + rebag_non_terminal { + // An expensive case for rebag-ing (rebag a non-terminal node): + // + // - The node to be rebagged, _R_, should exist as a non-terminal node in a bag with at + // least 2 other nodes. Thus _R_ will have both its `prev` and `next` nodes updated when + // it is removed. (3 W/R) + // - The destination bag is not empty, thus we need to update the `next` pointer of the last + // node in the destination in addition to the work we do otherwise. (2 W/R) + + // clear any pre-existing storage. + List::::clear(None); + + // define our origin and destination thresholds. + let origin_bag_thresh = T::BagThresholds::get()[0]; + let dest_bag_thresh = T::BagThresholds::get()[1]; + + // seed items in the origin bag. + let origin_head: T::AccountId = account("origin_head", 0, 0); + assert_ok!(List::::insert(origin_head.clone(), origin_bag_thresh)); + + let origin_middle: T::AccountId = account("origin_middle", 0, 0); // the node we rebag (_R_) + assert_ok!(List::::insert(origin_middle.clone(), origin_bag_thresh)); + + let origin_tail: T::AccountId = account("origin_tail", 0, 0); + assert_ok!(List::::insert(origin_tail.clone(), origin_bag_thresh)); + + // seed items in the destination bag. + let dest_head: T::AccountId = account("dest_head", 0, 0); + assert_ok!(List::::insert(dest_head.clone(), dest_bag_thresh)); + + // the bags are in the expected state after initial setup. + assert_eq!( + List::::get_bags(), + vec![ + (origin_bag_thresh, vec![origin_head.clone(), origin_middle.clone(), origin_tail.clone()]), + (dest_bag_thresh, vec![dest_head.clone()]) + ] + ); + + let caller = whitelisted_caller(); + // update the weight of `origin_middle` to guarantee it will be rebagged into the destination. + T::VoteWeightProvider::set_vote_weight_of(&origin_middle, dest_bag_thresh); + }: rebag(SystemOrigin::Signed(caller), origin_middle.clone()) + verify { + // check the bags have updated as expected. + assert_eq!( + List::::get_bags(), + vec![ + ( + origin_bag_thresh, + vec![origin_head, origin_tail], + ), + ( + dest_bag_thresh, + vec![dest_head, origin_middle], + ) + ] + ); + } + + rebag_terminal { + // An expensive case for rebag-ing (rebag a terminal node): + // + // - The node to be rebagged, _R_, is a terminal node; so _R_, the node pointing to _R_ and + // the origin bag itself will need to be updated. (3 W/R) + // - The destination bag is not empty, thus we need to update the `next` pointer of the last + // node in the destination in addition to the work we do otherwise. (2 W/R) + + // clear any pre-existing storage. + List::::clear(None); + + // define our origin and destination thresholds. + let origin_bag_thresh = T::BagThresholds::get()[0]; + let dest_bag_thresh = T::BagThresholds::get()[1]; + + // seed items in the origin bag. + let origin_head: T::AccountId = account("origin_head", 0, 0); + assert_ok!(List::::insert(origin_head.clone(), origin_bag_thresh)); + + let origin_tail: T::AccountId = account("origin_tail", 0, 0); // the node we rebag (_R_) + assert_ok!(List::::insert(origin_tail.clone(), origin_bag_thresh)); + + // seed items in the destination bag. + let dest_head: T::AccountId = account("dest_head", 0, 0); + assert_ok!(List::::insert(dest_head.clone(), dest_bag_thresh)); + + // the bags are in the expected state after initial setup. + assert_eq!( + List::::get_bags(), + vec![ + (origin_bag_thresh, vec![origin_head.clone(), origin_tail.clone()]), + (dest_bag_thresh, vec![dest_head.clone()]) + ] + ); + + let caller = whitelisted_caller(); + // update the weight of `origin_tail` to guarantee it will be rebagged into the destination. + T::VoteWeightProvider::set_vote_weight_of(&origin_tail, dest_bag_thresh); + }: rebag(SystemOrigin::Signed(caller), origin_tail.clone()) + verify { + // check the bags have updated as expected. + assert_eq!( + List::::get_bags(), + vec![ + (origin_bag_thresh, vec![origin_head.clone()]), + (dest_bag_thresh, vec![dest_head.clone(), origin_tail.clone()]) + ] + ); + } + + impl_benchmark_test_suite!( + Pallet, + crate::mock::ExtBuilder::default().build(), + crate::mock::Runtime, + ) +} diff --git a/frame/bags-list/src/lib.rs b/frame/bags-list/src/lib.rs new file mode 100644 index 0000000000000..4202a4d499895 --- /dev/null +++ b/frame/bags-list/src/lib.rs @@ -0,0 +1,306 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Bags-List Pallet +//! +//! A semi-sorted list, where items hold an `AccountId` based on some `VoteWeight`. The `AccountId` +//! (`id` for short) might be synonym to a `voter` or `nominator` in some context, and `VoteWeight` +//! signifies the chance of each id being included in the final [`VoteWeightProvider::iter`]. +//! +//! It implements [`sp_election_provider_support::SortedListProvider`] to provide a semi-sorted list +//! of accounts to another pallet. It needs some other pallet to give it some information about the +//! weights of accounts via [`sp_election_provider_support::VoteWeightProvider`]. +//! +//! This pallet is not configurable at genesis. Whoever uses it should call appropriate functions of +//! the `SortedListProvider` (e.g. `on_insert`, or `regenerate`) at their genesis. +//! +//! # Goals +//! +//! The data structure exposed by this pallet aims to be optimized for: +//! +//! - insertions and removals. +//! - iteration over the top* N items by weight, where the precise ordering of items doesn't +//! particularly matter. +//! +//! # Details +//! +//! - items are kept in bags, which are delineated by their range of weight (See [`BagThresholds`]). +//! - for iteration, bags are chained together from highest to lowest and elements within the bag +//! are iterated from head to tail. +//! - items within a bag are iterated in order of insertion. Thus removing an item and re-inserting +//! it will worsen its position in list iteration; this reduces incentives for some types of spam +//! that involve consistently removing and inserting for better position. Further, ordering +//! granularity is thus dictated by range between each bag threshold. +//! - if an item's weight changes to a value no longer within the range of its current bag the +//! item's position will need to be updated by an external actor with rebag (update), or removal +//! and insertion. + +#![cfg_attr(not(feature = "std"), no_std)] + +use frame_election_provider_support::{SortedListProvider, VoteWeight, VoteWeightProvider}; +use frame_system::ensure_signed; +use sp_std::prelude::*; + +#[cfg(any(feature = "runtime-benchmarks", test))] +mod benchmarks; + +mod list; +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; +pub mod weights; + +pub use pallet::*; +pub use weights::WeightInfo; + +pub use list::Error; +use list::List; + +pub(crate) const LOG_TARGET: &'static str = "runtime::bags_list"; + +// syntactic sugar for logging. +#[macro_export] +macro_rules! log { + ($level:tt, $patter:expr $(, $values:expr)* $(,)?) => { + log::$level!( + target: crate::LOG_TARGET, + concat!("[{:?}] 👜", $patter), >::block_number() $(, $values)* + ) + }; +} + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + #[pallet::generate_store(pub(crate) trait Store)] + #[pallet::generate_storage_info] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type Event: From> + IsType<::Event>; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: weights::WeightInfo; + + /// Something that provides the weights of ids. + type VoteWeightProvider: VoteWeightProvider; + + /// The list of thresholds separating the various bags. + /// + /// Ids are separated into unsorted bags according to their vote weight. This specifies the + /// thresholds separating the bags. An id's bag is the largest bag for which the id's weight + /// is less than or equal to its upper threshold. + /// + /// When ids are iterated, higher bags are iterated completely before lower bags. This means + /// that iteration is _semi-sorted_: ids of higher weight tend to come before ids of lower + /// weight, but peer ids within a particular bag are sorted in insertion order. + /// + /// # Expressing the constant + /// + /// This constant must be sorted in strictly increasing order. Duplicate items are not + /// permitted. + /// + /// There is an implied upper limit of `VoteWeight::MAX`; that value does not need to be + /// specified within the bag. For any two threshold lists, if one ends with + /// `VoteWeight::MAX`, the other one does not, and they are otherwise equal, the two lists + /// will behave identically. + /// + /// # Calculation + /// + /// It is recommended to generate the set of thresholds in a geometric series, such that + /// there exists some constant ratio such that `threshold[k + 1] == (threshold[k] * + /// constant_ratio).max(threshold[k] + 1)` for all `k`. + /// + /// The helpers in the `/utils/frame/generate-bags` module can simplify this calculation. + /// + /// # Examples + /// + /// - If `BagThresholds::get().is_empty()`, then all ids are put into the same bag, and + /// iteration is strictly in insertion order. + /// - If `BagThresholds::get().len() == 64`, and the thresholds are determined according to + /// the procedure given above, then the constant ratio is equal to 2. + /// - If `BagThresholds::get().len() == 200`, and the thresholds are determined according to + /// the procedure given above, then the constant ratio is approximately equal to 1.248. + /// - If the threshold list begins `[1, 2, 3, ...]`, then an id with weight 0 or 1 will fall + /// into bag 0, an id with weight 2 will fall into bag 1, etc. + /// + /// # Migration + /// + /// In the event that this list ever changes, a copy of the old bags list must be retained. + /// With that `List::migrate` can be called, which will perform the appropriate migration. + #[pallet::constant] + type BagThresholds: Get<&'static [VoteWeight]>; + } + + /// How many ids are registered. + // NOTE: This is merely a counter for `ListNodes`. It should someday be replaced by the + // `CountedMaop` storage. + #[pallet::storage] + pub(crate) type CounterForListNodes = StorageValue<_, u32, ValueQuery>; + + /// A single node, within some bag. + /// + /// Nodes store links forward and back within their respective bags. + #[pallet::storage] + pub(crate) type ListNodes = StorageMap<_, Twox64Concat, T::AccountId, list::Node>; + + /// A bag stored in storage. + /// + /// Stores a `Bag` struct, which stores head and tail pointers to itself. + #[pallet::storage] + pub(crate) type ListBags = StorageMap<_, Twox64Concat, VoteWeight, list::Bag>; + + #[pallet::event] + #[pallet::generate_deposit(pub(crate) fn deposit_event)] + pub enum Event { + /// Moved an account from one bag to another. \[who, from, to\]. + Rebagged(T::AccountId, VoteWeight, VoteWeight), + } + + #[pallet::call] + impl Pallet { + /// Declare that some `dislocated` account has, through rewards or penalties, sufficiently + /// changed its weight that it should properly fall into a different bag than its current + /// one. + /// + /// Anyone can call this function about any potentially dislocated account. + /// + /// Will never return an error; if `dislocated` does not exist or doesn't need a rebag, then + /// it is a noop and fees are still collected from `origin`. + #[pallet::weight(T::WeightInfo::rebag_non_terminal().max(T::WeightInfo::rebag_terminal()))] + pub fn rebag(origin: OriginFor, dislocated: T::AccountId) -> DispatchResult { + ensure_signed(origin)?; + let current_weight = T::VoteWeightProvider::vote_weight(&dislocated); + let _ = Pallet::::do_rebag(&dislocated, current_weight); + Ok(()) + } + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn integrity_test() { + // ensure they are strictly increasing, this also implies that duplicates are detected. + assert!( + T::BagThresholds::get().windows(2).all(|window| window[1] > window[0]), + "thresholds must strictly increase, and have no duplicates", + ); + } + } +} + +impl Pallet { + /// Move an account from one bag to another, depositing an event on success. + /// + /// If the account changed bags, returns `Some((from, to))`. + pub fn do_rebag( + account: &T::AccountId, + new_weight: VoteWeight, + ) -> Option<(VoteWeight, VoteWeight)> { + // if no voter at that node, don't do anything. + // the caller just wasted the fee to call this. + let maybe_movement = list::Node::::get(&account) + .and_then(|node| List::update_position_for(node, new_weight)); + if let Some((from, to)) = maybe_movement { + Self::deposit_event(Event::::Rebagged(account.clone(), from, to)); + }; + maybe_movement + } + + /// Equivalent to `ListBags::get`, but public. Useful for tests in outside of this crate. + #[cfg(feature = "std")] + pub fn list_bags_get(weight: VoteWeight) -> Option> { + ListBags::get(weight) + } +} + +impl SortedListProvider for Pallet { + type Error = Error; + + fn iter() -> Box> { + Box::new(List::::iter().map(|n| n.id().clone())) + } + + fn count() -> u32 { + CounterForListNodes::::get() + } + + fn contains(id: &T::AccountId) -> bool { + List::::contains(id) + } + + fn on_insert(id: T::AccountId, weight: VoteWeight) -> Result<(), Error> { + List::::insert(id, weight) + } + + fn on_update(id: &T::AccountId, new_weight: VoteWeight) { + Pallet::::do_rebag(id, new_weight); + } + + fn on_remove(id: &T::AccountId) { + List::::remove(id) + } + + fn regenerate( + all: impl IntoIterator, + weight_of: Box VoteWeight>, + ) -> u32 { + List::::regenerate(all, weight_of) + } + + #[cfg(feature = "std")] + fn sanity_check() -> Result<(), &'static str> { + List::::sanity_check() + } + + #[cfg(not(feature = "std"))] + fn sanity_check() -> Result<(), &'static str> { + Ok(()) + } + + fn clear(maybe_count: Option) -> u32 { + List::::clear(maybe_count) + } + + #[cfg(feature = "runtime-benchmarks")] + fn weight_update_worst_case(who: &T::AccountId, is_increase: bool) -> VoteWeight { + use frame_support::traits::Get as _; + let thresholds = T::BagThresholds::get(); + let node = list::Node::::get(who).unwrap(); + let current_bag_idx = thresholds + .iter() + .chain(sp_std::iter::once(&VoteWeight::MAX)) + .position(|w| w == &node.bag_upper()) + .unwrap(); + + if is_increase { + let next_threshold_idx = current_bag_idx + 1; + assert!(thresholds.len() > next_threshold_idx); + thresholds[next_threshold_idx] + } else { + assert!(current_bag_idx != 0); + let prev_threshold_idx = current_bag_idx - 1; + thresholds[prev_threshold_idx] + } + } +} diff --git a/frame/bags-list/src/list/mod.rs b/frame/bags-list/src/list/mod.rs new file mode 100644 index 0000000000000..3f55f22271910 --- /dev/null +++ b/frame/bags-list/src/list/mod.rs @@ -0,0 +1,786 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementation of a "bags list": a semi-sorted list where ordering granularity is dictated by +//! configurable thresholds that delineate the boundaries of bags. It uses a pattern of composite +//! data structures, where multiple storage items are masked by one outer API. See [`ListNodes`], +//! [`CounterForListNodes`] and [`ListBags`] for more information. +//! +//! The outer API of this module is the [`List`] struct. It wraps all acceptable operations on top +//! of the aggregate linked list. All operations with the bags list should happen through this +//! interface. + +use crate::Config; +use codec::{Decode, Encode, MaxEncodedLen}; +use frame_election_provider_support::{VoteWeight, VoteWeightProvider}; +use frame_support::{traits::Get, DefaultNoBound}; +use scale_info::TypeInfo; +use sp_std::{ + boxed::Box, + collections::{btree_map::BTreeMap, btree_set::BTreeSet}, + iter, + marker::PhantomData, + vec::Vec, +}; + +#[derive(Debug, PartialEq, Eq)] +pub enum Error { + /// A duplicate id has been detected. + Duplicate, +} + +#[cfg(test)] +mod tests; + +/// Given a certain vote weight, to which bag does it belong to? +/// +/// Bags are identified by their upper threshold; the value returned by this function is guaranteed +/// to be a member of `T::BagThresholds`. +/// +/// Note that even if the thresholds list does not have `VoteWeight::MAX` as its final member, this +/// function behaves as if it does. +pub(crate) fn notional_bag_for(weight: VoteWeight) -> VoteWeight { + let thresholds = T::BagThresholds::get(); + let idx = thresholds.partition_point(|&threshold| weight > threshold); + thresholds.get(idx).copied().unwrap_or(VoteWeight::MAX) +} + +/// The **ONLY** entry point of this module. All operations to the bags-list should happen through +/// this interface. It is forbidden to access other module members directly. +// +// Data structure providing efficient mostly-accurate selection of the top N id by `VoteWeight`. +// +// It's implemented as a set of linked lists. Each linked list comprises a bag of ids of +// arbitrary and unbounded length, all having a vote weight within a particular constant range. +// This structure means that ids can be added and removed in `O(1)` time. +// +// Iteration is accomplished by chaining the iteration of each bag, from greatest to least. While +// the users within any particular bag are sorted in an entirely arbitrary order, the overall vote +// weight decreases as successive bags are reached. This means that it is valid to truncate +// iteration at any desired point; only those ids in the lowest bag can be excluded. This +// satisfies both the desire for fairness and the requirement for efficiency. +pub struct List(PhantomData); + +impl List { + /// Remove all data associated with the list from storage. Parameter `items` is the number of + /// items to clear from the list. WARNING: `None` will clear all items and should generally not + /// be used in production as it could lead to an infinite number of storage accesses. + pub(crate) fn clear(maybe_count: Option) -> u32 { + crate::ListBags::::remove_all(maybe_count); + crate::ListNodes::::remove_all(maybe_count); + if let Some(count) = maybe_count { + crate::CounterForListNodes::::mutate(|items| *items - count); + count + } else { + crate::CounterForListNodes::::take() + } + } + + /// Regenerate all of the data from the given ids. + /// + /// WARNING: this is expensive and should only ever be performed when the list needs to be + /// generated from scratch. Care needs to be taken to ensure + /// + /// This may or may not need to be called at genesis as well, based on the configuration of the + /// pallet using this `List`. + /// + /// Returns the number of ids migrated. + pub fn regenerate( + all: impl IntoIterator, + weight_of: Box VoteWeight>, + ) -> u32 { + Self::clear(None); + Self::insert_many(all, weight_of) + } + + /// Migrate the list from one set of thresholds to another. + /// + /// This should only be called as part of an intentional migration; it's fairly expensive. + /// + /// Returns the number of accounts affected. + /// + /// Preconditions: + /// + /// - `old_thresholds` is the previous list of thresholds. + /// - All `bag_upper` currently in storage are members of `old_thresholds`. + /// - `T::BagThresholds` has already been updated and is the new set of thresholds. + /// + /// Postconditions: + /// + /// - All `bag_upper` currently in storage are members of `T::BagThresholds`. + /// - No id is changed unless required to by the difference between the old threshold list and + /// the new. + /// - ids whose bags change at all are implicitly rebagged into the appropriate bag in the new + /// threshold set. + #[allow(dead_code)] + pub fn migrate(old_thresholds: &[VoteWeight]) -> u32 { + let new_thresholds = T::BagThresholds::get(); + if new_thresholds == old_thresholds { + return 0 + } + + // we can't check all preconditions, but we can check one + debug_assert!( + crate::ListBags::::iter().all(|(threshold, _)| old_thresholds.contains(&threshold)), + "not all `bag_upper` currently in storage are members of `old_thresholds`", + ); + debug_assert!( + crate::ListNodes::::iter().all(|(_, node)| old_thresholds.contains(&node.bag_upper)), + "not all `node.bag_upper` currently in storage are members of `old_thresholds`", + ); + + let old_set: BTreeSet<_> = old_thresholds.iter().copied().collect(); + let new_set: BTreeSet<_> = new_thresholds.iter().copied().collect(); + + // accounts that need to be rebagged + let mut affected_accounts = BTreeSet::new(); + // track affected old bags to make sure we only iterate them once + let mut affected_old_bags = BTreeSet::new(); + + let new_bags = new_set.difference(&old_set).copied(); + // a new bag means that all accounts previously using the old bag's threshold must now + // be rebagged + for inserted_bag in new_bags { + let affected_bag = { + // this recreates `notional_bag_for` logic, but with the old thresholds. + let idx = old_thresholds.partition_point(|&threshold| inserted_bag > threshold); + old_thresholds.get(idx).copied().unwrap_or(VoteWeight::MAX) + }; + if !affected_old_bags.insert(affected_bag) { + // If the previous threshold list was [10, 20], and we insert [3, 5], then there's + // no point iterating through bag 10 twice. + continue + } + + if let Some(bag) = Bag::::get(affected_bag) { + affected_accounts.extend(bag.iter().map(|node| node.id)); + } + } + + let removed_bags = old_set.difference(&new_set).copied(); + // a removed bag means that all members of that bag must be rebagged + for removed_bag in removed_bags.clone() { + if !affected_old_bags.insert(removed_bag) { + continue + } + + if let Some(bag) = Bag::::get(removed_bag) { + affected_accounts.extend(bag.iter().map(|node| node.id)); + } + } + + // migrate the voters whose bag has changed + let num_affected = affected_accounts.len() as u32; + let weight_of = T::VoteWeightProvider::vote_weight; + let _removed = Self::remove_many(&affected_accounts); + debug_assert_eq!(_removed, num_affected); + let _inserted = Self::insert_many(affected_accounts.into_iter(), weight_of); + debug_assert_eq!(_inserted, num_affected); + + // we couldn't previously remove the old bags because both insertion and removal assume that + // it's always safe to add a bag if it's not present. Now that that's sorted, we can get rid + // of them. + // + // it's pretty cheap to iterate this again, because both sets are in-memory and require no + // lookups. + for removed_bag in removed_bags { + debug_assert!( + !crate::ListNodes::::iter().any(|(_, node)| node.bag_upper == removed_bag), + "no id should be present in a removed bag", + ); + crate::ListBags::::remove(removed_bag); + } + + debug_assert_eq!(Self::sanity_check(), Ok(())); + + num_affected + } + + /// Returns `true` if the list contains `id`, otherwise returns `false`. + pub(crate) fn contains(id: &T::AccountId) -> bool { + crate::ListNodes::::contains_key(id) + } + + /// Iterate over all nodes in all bags in the list. + /// + /// Full iteration can be expensive; it's recommended to limit the number of items with + /// `.take(n)`. + pub(crate) fn iter() -> impl Iterator> { + // We need a touch of special handling here: because we permit `T::BagThresholds` to + // omit the final bound, we need to ensure that we explicitly include that threshold in the + // list. + // + // It's important to retain the ability to omit the final bound because it makes tests much + // easier; they can just configure `type BagThresholds = ()`. + let thresholds = T::BagThresholds::get(); + let iter = thresholds.iter().copied(); + let iter: Box> = if thresholds.last() == Some(&VoteWeight::MAX) { + // in the event that they included it, we can just pass the iterator through unchanged. + Box::new(iter.rev()) + } else { + // otherwise, insert it here. + Box::new(iter.chain(iter::once(VoteWeight::MAX)).rev()) + }; + + iter.filter_map(Bag::get).flat_map(|bag| bag.iter()) + } + + /// Insert several ids into the appropriate bags in the list. Continues with insertions + /// if duplicates are detected. + /// + /// Returns the final count of number of ids inserted. + fn insert_many( + ids: impl IntoIterator, + weight_of: impl Fn(&T::AccountId) -> VoteWeight, + ) -> u32 { + let mut count = 0; + ids.into_iter().for_each(|v| { + let weight = weight_of(&v); + if Self::insert(v, weight).is_ok() { + count += 1; + } + }); + + count + } + + /// Insert a new id into the appropriate bag in the list. + /// + /// Returns an error if the list already contains `id`. + pub(crate) fn insert(id: T::AccountId, weight: VoteWeight) -> Result<(), Error> { + if Self::contains(&id) { + return Err(Error::Duplicate) + } + + let bag_weight = notional_bag_for::(weight); + let mut bag = Bag::::get_or_make(bag_weight); + // unchecked insertion is okay; we just got the correct `notional_bag_for`. + bag.insert_unchecked(id.clone()); + + // new inserts are always the tail, so we must write the bag. + bag.put(); + + crate::CounterForListNodes::::mutate(|prev_count| { + *prev_count = prev_count.saturating_add(1) + }); + + crate::log!( + debug, + "inserted {:?} with weight {} into bag {:?}, new count is {}", + id, + weight, + bag_weight, + crate::CounterForListNodes::::get(), + ); + + Ok(()) + } + + /// Remove an id from the list. + pub(crate) fn remove(id: &T::AccountId) { + Self::remove_many(sp_std::iter::once(id)); + } + + /// Remove many ids from the list. + /// + /// This is more efficient than repeated calls to `Self::remove`. + /// + /// Returns the final count of number of ids removed. + fn remove_many<'a>(ids: impl IntoIterator) -> u32 { + let mut bags = BTreeMap::new(); + let mut count = 0; + + for id in ids.into_iter() { + let node = match Node::::get(id) { + Some(node) => node, + None => continue, + }; + count += 1; + + if !node.is_terminal() { + // this node is not a head or a tail and thus the bag does not need to be updated + node.excise() + } else { + // this node is a head or tail, so the bag needs to be updated + let bag = bags + .entry(node.bag_upper) + .or_insert_with(|| Bag::::get_or_make(node.bag_upper)); + // node.bag_upper must be correct, therefore this bag will contain this node. + bag.remove_node_unchecked(&node); + } + + // now get rid of the node itself + node.remove_from_storage_unchecked() + } + + for (_, bag) in bags { + bag.put(); + } + + crate::CounterForListNodes::::mutate(|prev_count| { + *prev_count = prev_count.saturating_sub(count) + }); + + count + } + + /// Update a node's position in the list. + /// + /// If the node was in the correct bag, no effect. If the node was in the incorrect bag, they + /// are moved into the correct bag. + /// + /// Returns `Some((old_idx, new_idx))` if the node moved, otherwise `None`. + /// + /// This operation is somewhat more efficient than simply calling [`self.remove`] followed by + /// [`self.insert`]. However, given large quantities of nodes to move, it may be more efficient + /// to call [`self.remove_many`] followed by [`self.insert_many`]. + pub(crate) fn update_position_for( + node: Node, + new_weight: VoteWeight, + ) -> Option<(VoteWeight, VoteWeight)> { + node.is_misplaced(new_weight).then(move || { + let old_bag_upper = node.bag_upper; + + if !node.is_terminal() { + // this node is not a head or a tail, so we can just cut it out of the list. update + // and put the prev and next of this node, we do `node.put` inside `insert_note`. + node.excise(); + } else if let Some(mut bag) = Bag::::get(node.bag_upper) { + // this is a head or tail, so the bag must be updated. + bag.remove_node_unchecked(&node); + bag.put(); + } else { + crate::log!( + error, + "Node {:?} did not have a bag; ListBags is in an inconsistent state", + node.id, + ); + debug_assert!(false, "every node must have an extant bag associated with it"); + } + + // put the node into the appropriate new bag. + let new_bag_upper = notional_bag_for::(new_weight); + let mut bag = Bag::::get_or_make(new_bag_upper); + // prev, next, and bag_upper of the node are updated inside `insert_node`, also + // `node.put` is in there. + bag.insert_node_unchecked(node); + bag.put(); + + (old_bag_upper, new_bag_upper) + }) + } + + /// Sanity check the list. + /// + /// This should be called from the call-site, whenever one of the mutating apis (e.g. `insert`) + /// is being used, after all other staking data (such as counter) has been updated. It checks: + /// + /// * there are no duplicate ids, + /// * length of this list is in sync with `CounterForListNodes`, + /// * and sanity-checks all bags. This will cascade down all the checks and makes sure all bags + /// are checked per *any* update to `List`. + #[cfg(feature = "std")] + pub(crate) fn sanity_check() -> Result<(), &'static str> { + use frame_support::ensure; + let mut seen_in_list = BTreeSet::new(); + ensure!( + Self::iter().map(|node| node.id).all(|id| seen_in_list.insert(id)), + "duplicate identified", + ); + + let iter_count = Self::iter().count() as u32; + let stored_count = crate::CounterForListNodes::::get(); + let nodes_count = crate::ListNodes::::iter().count() as u32; + ensure!(iter_count == stored_count, "iter_count != stored_count"); + ensure!(stored_count == nodes_count, "stored_count != nodes_count"); + + crate::log!(debug, "count of nodes: {}", stored_count); + + let active_bags = { + let thresholds = T::BagThresholds::get().iter().copied(); + let thresholds: Vec = if thresholds.clone().last() == Some(VoteWeight::MAX) { + // in the event that they included it, we don't need to make any changes + // Box::new(thresholds.collect() + thresholds.collect() + } else { + // otherwise, insert it here. + thresholds.chain(iter::once(VoteWeight::MAX)).collect() + }; + thresholds.into_iter().filter_map(|t| Bag::::get(t)) + }; + + let _ = active_bags.clone().map(|b| b.sanity_check()).collect::>()?; + + let nodes_in_bags_count = + active_bags.clone().fold(0u32, |acc, cur| acc + cur.iter().count() as u32); + ensure!(nodes_count == nodes_in_bags_count, "stored_count != nodes_in_bags_count"); + + crate::log!(debug, "count of active bags {}", active_bags.count()); + + // check that all nodes are sane. We check the `ListNodes` storage item directly in case we + // have some "stale" nodes that are not in a bag. + for (_id, node) in crate::ListNodes::::iter() { + node.sanity_check()? + } + + Ok(()) + } + + #[cfg(not(feature = "std"))] + pub(crate) fn sanity_check() -> Result<(), &'static str> { + Ok(()) + } + + /// Returns the nodes of all non-empty bags. For testing and benchmarks. + #[cfg(any(feature = "std", feature = "runtime-benchmarks"))] + #[allow(dead_code)] + pub(crate) fn get_bags() -> Vec<(VoteWeight, Vec)> { + use frame_support::traits::Get as _; + + let thresholds = T::BagThresholds::get(); + let iter = thresholds.iter().copied(); + let iter: Box> = if thresholds.last() == Some(&VoteWeight::MAX) { + // in the event that they included it, we can just pass the iterator through unchanged. + Box::new(iter) + } else { + // otherwise, insert it here. + Box::new(iter.chain(sp_std::iter::once(VoteWeight::MAX))) + }; + + iter.filter_map(|t| { + Bag::::get(t).map(|bag| (t, bag.iter().map(|n| n.id().clone()).collect::>())) + }) + .collect::>() + } +} + +/// A Bag is a doubly-linked list of ids, where each id is mapped to a [`ListNode`]. +/// +/// Note that we maintain both head and tail pointers. While it would be possible to get away with +/// maintaining only a head pointer and cons-ing elements onto the front of the list, it's more +/// desirable to ensure that there is some element of first-come, first-serve to the list's +/// iteration so that there's no incentive to churn ids positioning to improve the chances of +/// appearing within the ids set. +#[derive(DefaultNoBound, Encode, Decode, MaxEncodedLen, TypeInfo)] +#[codec(mel_bound(T: Config))] +#[scale_info(skip_type_params(T))] +#[cfg_attr(feature = "std", derive(frame_support::DebugNoBound, Clone, PartialEq))] +pub struct Bag { + head: Option, + tail: Option, + + #[codec(skip)] + bag_upper: VoteWeight, +} + +impl Bag { + #[cfg(test)] + pub(crate) fn new( + head: Option, + tail: Option, + bag_upper: VoteWeight, + ) -> Self { + Self { head, tail, bag_upper } + } + + /// Get a bag by its upper vote weight. + pub(crate) fn get(bag_upper: VoteWeight) -> Option> { + crate::ListBags::::try_get(bag_upper).ok().map(|mut bag| { + bag.bag_upper = bag_upper; + bag + }) + } + + /// Get a bag by its upper vote weight or make it, appropriately initialized. Does not check if + /// if `bag_upper` is a valid threshold. + fn get_or_make(bag_upper: VoteWeight) -> Bag { + Self::get(bag_upper).unwrap_or(Bag { bag_upper, ..Default::default() }) + } + + /// `True` if self is empty. + fn is_empty(&self) -> bool { + self.head.is_none() && self.tail.is_none() + } + + /// Put the bag back into storage. + fn put(self) { + if self.is_empty() { + crate::ListBags::::remove(self.bag_upper); + } else { + crate::ListBags::::insert(self.bag_upper, self); + } + } + + /// Get the head node in this bag. + fn head(&self) -> Option> { + self.head.as_ref().and_then(|id| Node::get(id)) + } + + /// Get the tail node in this bag. + fn tail(&self) -> Option> { + self.tail.as_ref().and_then(|id| Node::get(id)) + } + + /// Iterate over the nodes in this bag. + pub(crate) fn iter(&self) -> impl Iterator> { + sp_std::iter::successors(self.head(), |prev| prev.next()) + } + + /// Insert a new id into this bag. + /// + /// This is private on purpose because it's naive: it doesn't check whether this is the + /// appropriate bag for this id at all. Generally, use [`List::insert`] instead. + /// + /// Storage note: this modifies storage, but only for the nodes. You still need to call + /// `self.put()` after use. + fn insert_unchecked(&mut self, id: T::AccountId) { + // insert_node will overwrite `prev`, `next` and `bag_upper` to the proper values. As long + // as this bag is the correct one, we're good. All calls to this must come after getting the + // correct [`notional_bag_for`]. + self.insert_node_unchecked(Node:: { id, prev: None, next: None, bag_upper: 0 }); + } + + /// Insert a node into this bag. + /// + /// This is private on purpose because it's naive; it doesn't check whether this is the + /// appropriate bag for this node at all. Generally, use [`List::insert`] instead. + /// + /// Storage note: this modifies storage, but only for the node. You still need to call + /// `self.put()` after use. + fn insert_node_unchecked(&mut self, mut node: Node) { + if let Some(tail) = &self.tail { + if *tail == node.id { + // this should never happen, but this check prevents one path to a worst case + // infinite loop. + debug_assert!(false, "system logic error: inserting a node who has the id of tail"); + crate::log!(warn, "system logic error: inserting a node who has the id of tail"); + return + }; + } + + // re-set the `bag_upper`. Regardless of whatever the node had previously, now it is going + // to be `self.bag_upper`. + node.bag_upper = self.bag_upper; + + let id = node.id.clone(); + // update this node now, treating it as the new tail. + node.prev = self.tail.clone(); + node.next = None; + node.put(); + + // update the previous tail. + if let Some(mut old_tail) = self.tail() { + old_tail.next = Some(id.clone()); + old_tail.put(); + } + self.tail = Some(id.clone()); + + // ensure head exist. This is only set when the length of the bag is just 1, i.e. if this is + // the first insertion into the bag. In this case, both head and tail should point to the + // same node. + if self.head.is_none() { + self.head = Some(id.clone()); + debug_assert!(self.iter().count() == 1); + } + } + + /// Remove a node from this bag. + /// + /// This is private on purpose because it doesn't check whether this bag contains the node in + /// the first place. Generally, use [`List::remove`] instead, similar to `insert_unchecked`. + /// + /// Storage note: this modifies storage, but only for adjacent nodes. You still need to call + /// `self.put()` and `ListNodes::remove(id)` to update storage for the bag and `node`. + fn remove_node_unchecked(&mut self, node: &Node) { + // reassign neighboring nodes. + node.excise(); + + // clear the bag head/tail pointers as necessary. + if self.tail.as_ref() == Some(&node.id) { + self.tail = node.prev.clone(); + } + if self.head.as_ref() == Some(&node.id) { + self.head = node.next.clone(); + } + } + + /// Sanity check this bag. + /// + /// Should be called by the call-site, after any mutating operation on a bag. The call site of + /// this struct is always `List`. + /// + /// * Ensures head has no prev. + /// * Ensures tail has no next. + /// * Ensures there are no loops, traversal from head to tail is correct. + #[cfg(feature = "std")] + fn sanity_check(&self) -> Result<(), &'static str> { + frame_support::ensure!( + self.head() + .map(|head| head.prev().is_none()) + // if there is no head, then there must not be a tail, meaning that the bag is + // empty. + .unwrap_or_else(|| self.tail.is_none()), + "head has a prev" + ); + + frame_support::ensure!( + self.tail() + .map(|tail| tail.next().is_none()) + // if there is no tail, then there must not be a head, meaning that the bag is + // empty. + .unwrap_or_else(|| self.head.is_none()), + "tail has a next" + ); + + let mut seen_in_bag = BTreeSet::new(); + frame_support::ensure!( + self.iter() + .map(|node| node.id) + // each voter is only seen once, thus there is no cycle within a bag + .all(|voter| seen_in_bag.insert(voter)), + "duplicate found in bag" + ); + + Ok(()) + } + + #[cfg(not(feature = "std"))] + fn sanity_check(&self) -> Result<(), &'static str> { + Ok(()) + } + + /// Iterate over the nodes in this bag (public for tests). + #[cfg(feature = "std")] + #[allow(dead_code)] + pub fn std_iter(&self) -> impl Iterator> { + sp_std::iter::successors(self.head(), |prev| prev.next()) + } + + /// Check if the bag contains a node with `id`. + #[cfg(feature = "std")] + fn contains(&self, id: &T::AccountId) -> bool { + self.iter().find(|n| n.id() == id).is_some() + } +} + +/// A Node is the fundamental element comprising the doubly-linked list described by `Bag`. +#[derive(Encode, Decode, MaxEncodedLen, TypeInfo)] +#[codec(mel_bound(T: Config))] +#[scale_info(skip_type_params(T))] +#[cfg_attr(feature = "std", derive(frame_support::DebugNoBound, Clone, PartialEq))] +pub struct Node { + id: T::AccountId, + prev: Option, + next: Option, + bag_upper: VoteWeight, +} + +impl Node { + /// Get a node by id. + pub(crate) fn get(id: &T::AccountId) -> Option> { + crate::ListNodes::::try_get(id).ok() + } + + /// Put the node back into storage. + fn put(self) { + crate::ListNodes::::insert(self.id.clone(), self); + } + + /// Update neighboring nodes to point to reach other. + /// + /// Only updates storage for adjacent nodes, but not `self`; so the user may need to call + /// `self.put`. + fn excise(&self) { + // Update previous node. + if let Some(mut prev) = self.prev() { + prev.next = self.next.clone(); + prev.put(); + } + // Update next self. + if let Some(mut next) = self.next() { + next.prev = self.prev.clone(); + next.put(); + } + } + + /// This is a naive function that removes a node from the `ListNodes` storage item. + /// + /// It is naive because it does not check if the node has first been removed from its bag. + fn remove_from_storage_unchecked(&self) { + crate::ListNodes::::remove(&self.id) + } + + /// Get the previous node in the bag. + fn prev(&self) -> Option> { + self.prev.as_ref().and_then(|id| Node::get(id)) + } + + /// Get the next node in the bag. + fn next(&self) -> Option> { + self.next.as_ref().and_then(|id| Node::get(id)) + } + + /// `true` when this voter is in the wrong bag. + pub(crate) fn is_misplaced(&self, current_weight: VoteWeight) -> bool { + notional_bag_for::(current_weight) != self.bag_upper + } + + /// `true` when this voter is a bag head or tail. + fn is_terminal(&self) -> bool { + self.prev.is_none() || self.next.is_none() + } + + /// Get the underlying voter. + pub(crate) fn id(&self) -> &T::AccountId { + &self.id + } + + /// Get the underlying voter (public fo tests). + #[cfg(feature = "std")] + #[allow(dead_code)] + pub fn std_id(&self) -> &T::AccountId { + &self.id + } + + /// The bag this nodes belongs to (public for benchmarks). + #[cfg(feature = "runtime-benchmarks")] + #[allow(dead_code)] + pub fn bag_upper(&self) -> VoteWeight { + self.bag_upper + } + + #[cfg(feature = "std")] + fn sanity_check(&self) -> Result<(), &'static str> { + let expected_bag = Bag::::get(self.bag_upper).ok_or("bag not found for node")?; + + let id = self.id(); + + frame_support::ensure!( + expected_bag.contains(id), + "node does not exist in the expected bag" + ); + + frame_support::ensure!( + !self.is_terminal() || + expected_bag.head.as_ref() == Some(id) || + expected_bag.tail.as_ref() == Some(id), + "a terminal node is neither its bag head or tail" + ); + + Ok(()) + } +} diff --git a/frame/bags-list/src/list/tests.rs b/frame/bags-list/src/list/tests.rs new file mode 100644 index 0000000000000..14802bac9d1d8 --- /dev/null +++ b/frame/bags-list/src/list/tests.rs @@ -0,0 +1,740 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use crate::{ + mock::{test_utils::*, *}, + CounterForListNodes, ListBags, ListNodes, +}; +use frame_election_provider_support::SortedListProvider; +use frame_support::{assert_ok, assert_storage_noop}; + +#[test] +fn basic_setup_works() { + ExtBuilder::default().build_and_execute(|| { + // syntactic sugar to create a raw node + let node = |id, prev, next, bag_upper| Node:: { id, prev, next, bag_upper }; + + assert_eq!(CounterForListNodes::::get(), 4); + assert_eq!(ListNodes::::iter().count(), 4); + assert_eq!(ListBags::::iter().count(), 2); + + assert_eq!(List::::get_bags(), vec![(10, vec![1]), (1_000, vec![2, 3, 4])]); + + // the state of the bags is as expected + assert_eq!( + ListBags::::get(10).unwrap(), + Bag:: { head: Some(1), tail: Some(1), bag_upper: 0 } + ); + assert_eq!( + ListBags::::get(1_000).unwrap(), + Bag:: { head: Some(2), tail: Some(4), bag_upper: 0 } + ); + + assert_eq!(ListNodes::::get(2).unwrap(), node(2, None, Some(3), 1_000)); + assert_eq!(ListNodes::::get(3).unwrap(), node(3, Some(2), Some(4), 1_000)); + assert_eq!(ListNodes::::get(4).unwrap(), node(4, Some(3), None, 1_000)); + assert_eq!(ListNodes::::get(1).unwrap(), node(1, None, None, 10)); + + // non-existent id does not have a storage footprint + assert_eq!(ListNodes::::get(42), None); + + // iteration of the bags would yield: + assert_eq!( + List::::iter().map(|n| *n.id()).collect::>(), + vec![2, 3, 4, 1], + // ^^ note the order of insertion in genesis! + ); + }); +} + +#[test] +fn notional_bag_for_works() { + // under a threshold gives the next threshold. + assert_eq!(notional_bag_for::(0), 10); + assert_eq!(notional_bag_for::(9), 10); + + // at a threshold gives that threshold. + assert_eq!(notional_bag_for::(10), 10); + + // above the threshold, gives the next threshold. + assert_eq!(notional_bag_for::(11), 20); + + let max_explicit_threshold = *::BagThresholds::get().last().unwrap(); + assert_eq!(max_explicit_threshold, 10_000); + + // if the max explicit threshold is less than VoteWeight::MAX, + assert!(VoteWeight::MAX > max_explicit_threshold); + + // then anything above it will belong to the VoteWeight::MAX bag. + assert_eq!(notional_bag_for::(max_explicit_threshold), max_explicit_threshold); + assert_eq!(notional_bag_for::(max_explicit_threshold + 1), VoteWeight::MAX); +} + +#[test] +fn remove_last_node_in_bags_cleans_bag() { + ExtBuilder::default().build_and_execute(|| { + // given + assert_eq!(List::::get_bags(), vec![(10, vec![1]), (1_000, vec![2, 3, 4])]); + + // bump 1 to a bigger bag + List::::remove(&1); + assert_ok!(List::::insert(1, 10_000)); + + // then the bag with bound 10 is wiped from storage. + assert_eq!(List::::get_bags(), vec![(1_000, vec![2, 3, 4]), (10_000, vec![1])]); + + // and can be recreated again as needed. + assert_ok!(List::::insert(77, 10)); + assert_eq!( + List::::get_bags(), + vec![(10, vec![77]), (1_000, vec![2, 3, 4]), (10_000, vec![1])] + ); + }); +} + +#[test] +fn migrate_works() { + ExtBuilder::default() + .add_ids(vec![(710, 15), (711, 16), (712, 2_000)]) + .build_and_execute(|| { + // given + assert_eq!( + List::::get_bags(), + vec![ + (10, vec![1]), + (20, vec![710, 711]), + (1_000, vec![2, 3, 4]), + (2_000, vec![712]) + ] + ); + let old_thresholds = ::BagThresholds::get(); + assert_eq!(old_thresholds, vec![10, 20, 30, 40, 50, 60, 1_000, 2_000, 10_000]); + + // when the new thresholds adds `15` and removes `2_000` + const NEW_THRESHOLDS: &'static [VoteWeight] = + &[10, 15, 20, 30, 40, 50, 60, 1_000, 10_000]; + BagThresholds::set(NEW_THRESHOLDS); + // and we call + List::::migrate(old_thresholds); + + // then + assert_eq!( + List::::get_bags(), + vec![ + (10, vec![1]), + (15, vec![710]), // nodes in range 11 ..= 15 move from bag 20 to bag 15 + (20, vec![711]), + (1_000, vec![2, 3, 4]), + // nodes in range 1_001 ..= 2_000 move from bag 2_000 to bag 10_000 + (10_000, vec![712]), + ] + ); + }); +} + +mod list { + use super::*; + + #[test] + fn iteration_is_semi_sorted() { + ExtBuilder::default() + .add_ids(vec![(5, 2_000), (6, 2_000)]) + .build_and_execute(|| { + // given + assert_eq!( + List::::get_bags(), + vec![(10, vec![1]), (1_000, vec![2, 3, 4]), (2_000, vec![5, 6])] + ); + assert_eq!( + get_list_as_ids(), + vec![ + 5, 6, // best bag + 2, 3, 4, // middle bag + 1, // last bag. + ] + ); + + // when adding an id that has a higher weight than pre-existing ids in the bag + assert_ok!(List::::insert(7, 10)); + + // then + assert_eq!( + get_list_as_ids(), + vec![ + 5, 6, // best bag + 2, 3, 4, // middle bag + 1, 7, // last bag; new id is last. + ] + ); + }) + } + + /// we can `take` x ids, even if that quantity ends midway through a list. + #[test] + fn take_works() { + ExtBuilder::default() + .add_ids(vec![(5, 2_000), (6, 2_000)]) + .build_and_execute(|| { + // given + assert_eq!( + List::::get_bags(), + vec![(10, vec![1]), (1_000, vec![2, 3, 4]), (2_000, vec![5, 6])] + ); + + // when + let iteration = + List::::iter().map(|node| *node.id()).take(4).collect::>(); + + // then + assert_eq!( + iteration, + vec![ + 5, 6, // best bag, fully iterated + 2, 3, // middle bag, partially iterated + ] + ); + }) + } + + #[test] + fn insert_works() { + ExtBuilder::default().build_and_execute(|| { + // when inserting into an existing bag + assert_ok!(List::::insert(5, 1_000)); + + // then + assert_eq!(List::::get_bags(), vec![(10, vec![1]), (1_000, vec![2, 3, 4, 5])]); + assert_eq!(get_list_as_ids(), vec![2, 3, 4, 5, 1]); + + // when inserting into a non-existent bag + assert_ok!(List::::insert(6, 1_001)); + + // then + assert_eq!( + List::::get_bags(), + vec![(10, vec![1]), (1_000, vec![2, 3, 4, 5]), (2_000, vec![6])] + ); + assert_eq!(get_list_as_ids(), vec![6, 2, 3, 4, 5, 1]); + }); + } + + #[test] + fn insert_errors_with_duplicate_id() { + ExtBuilder::default().build_and_execute(|| { + // given + assert!(get_list_as_ids().contains(&3)); + + // then + assert_storage_noop!(assert_eq!( + List::::insert(3, 20).unwrap_err(), + Error::Duplicate + )); + }); + } + + #[test] + fn remove_works() { + use crate::{CounterForListNodes, ListBags, ListNodes}; + let ensure_left = |id, counter| { + assert!(!ListNodes::::contains_key(id)); + assert_eq!(CounterForListNodes::::get(), counter); + assert_eq!(ListNodes::::iter().count() as u32, counter); + }; + + ExtBuilder::default().build_and_execute(|| { + // removing a non-existent id is a noop + assert!(!ListNodes::::contains_key(42)); + assert_storage_noop!(List::::remove(&42)); + + // when removing a node from a bag with multiple nodes: + List::::remove(&2); + + // then + assert_eq!(get_list_as_ids(), vec![3, 4, 1]); + assert_eq!(List::::get_bags(), vec![(10, vec![1]), (1_000, vec![3, 4])]); + ensure_left(2, 3); + + // when removing a node from a bag with only one node: + List::::remove(&1); + + // then + assert_eq!(get_list_as_ids(), vec![3, 4]); + assert_eq!(List::::get_bags(), vec![(1_000, vec![3, 4])]); + ensure_left(1, 2); + // bag 10 is removed + assert!(!ListBags::::contains_key(10)); + + // remove remaining ids to make sure storage cleans up as expected + List::::remove(&3); + ensure_left(3, 1); + assert_eq!(get_list_as_ids(), vec![4]); + + List::::remove(&4); + ensure_left(4, 0); + assert_eq!(get_list_as_ids(), Vec::::new()); + + // bags are deleted via removals + assert_eq!(ListBags::::iter().count(), 0); + }); + } + + #[test] + fn remove_many_is_noop_with_non_existent_ids() { + ExtBuilder::default().build_and_execute(|| { + let non_existent_ids = vec![&42, &666, &13]; + + // when account ids don' exist in the list + assert!(non_existent_ids.iter().all(|id| !BagsList::contains(id))); + + // then removing them is a noop + assert_storage_noop!(List::::remove_many(non_existent_ids)); + }); + } + + #[test] + fn update_position_for_works() { + ExtBuilder::default().build_and_execute(|| { + // given a correctly placed account 1 at bag 10. + let node = Node::::get(&1).unwrap(); + assert!(!node.is_misplaced(10)); + + // .. it is invalid with weight 20 + assert!(node.is_misplaced(20)); + + // move it to bag 20. + assert_eq!(List::::update_position_for(node, 20), Some((10, 20))); + + assert_eq!(List::::get_bags(), vec![(20, vec![1]), (1_000, vec![2, 3, 4])]); + + // get the new updated node; try and update the position with no change in weight. + let node = Node::::get(&1).unwrap(); + assert_storage_noop!(assert_eq!( + List::::update_position_for(node.clone(), 20), + None + )); + + // then move it to bag 1_000 by giving it weight 500. + assert_eq!(List::::update_position_for(node.clone(), 500), Some((20, 1_000))); + assert_eq!(List::::get_bags(), vec![(1_000, vec![2, 3, 4, 1])]); + + // moving within that bag again is a noop + let node = Node::::get(&1).unwrap(); + assert_storage_noop!(assert_eq!( + List::::update_position_for(node.clone(), 750), + None, + )); + assert_storage_noop!(assert_eq!( + List::::update_position_for(node, 1_000), + None, + )); + }); + } + + #[test] + fn sanity_check_works() { + ExtBuilder::default().build_and_execute_no_post_check(|| { + assert_ok!(List::::sanity_check()); + }); + + // make sure there are no duplicates. + ExtBuilder::default().build_and_execute_no_post_check(|| { + Bag::::get(10).unwrap().insert_unchecked(2); + assert_eq!(List::::sanity_check(), Err("duplicate identified")); + }); + + // ensure count is in sync with `CounterForListNodes`. + ExtBuilder::default().build_and_execute_no_post_check(|| { + crate::CounterForListNodes::::mutate(|counter| *counter += 1); + assert_eq!(crate::CounterForListNodes::::get(), 5); + assert_eq!(List::::sanity_check(), Err("iter_count != stored_count")); + }); + } + + #[test] + fn contains_works() { + ExtBuilder::default().build_and_execute(|| { + assert!(GENESIS_IDS.iter().all(|(id, _)| List::::contains(id))); + + let non_existent_ids = vec![&42, &666, &13]; + assert!(non_existent_ids.iter().all(|id| !List::::contains(id))); + }) + } +} + +mod bags { + use super::*; + + #[test] + fn get_works() { + ExtBuilder::default().build_and_execute(|| { + let check_bag = |bag_upper, head, tail, ids| { + let bag = Bag::::get(bag_upper).unwrap(); + let bag_ids = bag.iter().map(|n| *n.id()).collect::>(); + + assert_eq!(bag, Bag:: { head, tail, bag_upper }); + assert_eq!(bag_ids, ids); + }; + + assert_eq!(List::::get_bags(), vec![(10, vec![1]), (1_000, vec![2, 3, 4])]); + + // we can fetch them + check_bag(10, Some(1), Some(1), vec![1]); + check_bag(1_000, Some(2), Some(4), vec![2, 3, 4]); + + // and all other bag thresholds don't get bags. + ::BagThresholds::get() + .iter() + .chain(iter::once(&VoteWeight::MAX)) + .filter(|bag_upper| !vec![10, 1_000].contains(bag_upper)) + .for_each(|bag_upper| { + assert_storage_noop!(assert_eq!(Bag::::get(*bag_upper), None)); + assert!(!ListBags::::contains_key(*bag_upper)); + }); + + // when we make a pre-existing bag empty + List::::remove(&1); + + // then + assert_eq!(Bag::::get(10), None) + }); + } + + #[test] + fn insert_node_sets_proper_bag() { + ExtBuilder::default().build_and_execute_no_post_check(|| { + let node = |id, bag_upper| Node:: { id, prev: None, next: None, bag_upper }; + + assert_eq!(List::::get_bags(), vec![(10, vec![1]), (1_000, vec![2, 3, 4])]); + + let mut bag_10 = Bag::::get(10).unwrap(); + bag_10.insert_node_unchecked(node(42, 5)); + + assert_eq!( + ListNodes::::get(&42).unwrap(), + Node { bag_upper: 10, prev: Some(1), next: None, id: 42 } + ); + }); + } + + #[test] + fn insert_node_happy_paths_works() { + ExtBuilder::default().build_and_execute_no_post_check(|| { + let node = |id, bag_upper| Node:: { id, prev: None, next: None, bag_upper }; + + // when inserting into a bag with 1 node + let mut bag_10 = Bag::::get(10).unwrap(); + bag_10.insert_node_unchecked(node(42, bag_10.bag_upper)); + // then + assert_eq!(bag_as_ids(&bag_10), vec![1, 42]); + + // when inserting into a bag with 3 nodes + let mut bag_1000 = Bag::::get(1_000).unwrap(); + bag_1000.insert_node_unchecked(node(52, bag_1000.bag_upper)); + // then + assert_eq!(bag_as_ids(&bag_1000), vec![2, 3, 4, 52]); + + // when inserting into a new bag + let mut bag_20 = Bag::::get_or_make(20); + bag_20.insert_node_unchecked(node(62, bag_20.bag_upper)); + // then + assert_eq!(bag_as_ids(&bag_20), vec![62]); + + // when inserting a node pointing to the accounts not in the bag + let node_61 = + Node:: { id: 61, prev: Some(21), next: Some(101), bag_upper: 20 }; + bag_20.insert_node_unchecked(node_61); + // then ids are in order + assert_eq!(bag_as_ids(&bag_20), vec![62, 61]); + // and when the node is re-fetched all the info is correct + assert_eq!( + Node::::get(&61).unwrap(), + Node:: { id: 61, prev: Some(62), next: None, bag_upper: 20 } + ); + + // state of all bags is as expected + bag_20.put(); // need to put this newly created bag so its in the storage map + assert_eq!( + List::::get_bags(), + vec![(10, vec![1, 42]), (20, vec![62, 61]), (1_000, vec![2, 3, 4, 52])] + ); + }); + } + + // Document improper ways `insert_node` may be getting used. + #[test] + fn insert_node_bad_paths_documented() { + let node = |id, prev, next, bag_upper| Node:: { id, prev, next, bag_upper }; + ExtBuilder::default().build_and_execute_no_post_check(|| { + // when inserting a node with both prev & next pointing at an account in an incorrect + // bag. + let mut bag_1000 = Bag::::get(1_000).unwrap(); + bag_1000.insert_node_unchecked(node(42, Some(1), Some(1), 500)); + + // then the proper prev and next is set. + assert_eq!(bag_as_ids(&bag_1000), vec![2, 3, 4, 42]); + + // and when the node is re-fetched all the info is correct + assert_eq!( + Node::::get(&42).unwrap(), + node(42, Some(4), None, bag_1000.bag_upper) + ); + }); + + ExtBuilder::default().build_and_execute_no_post_check(|| { + // given 3 is in bag_1000 (and not a tail node) + let mut bag_1000 = Bag::::get(1_000).unwrap(); + assert_eq!(bag_as_ids(&bag_1000), vec![2, 3, 4]); + + // when inserting a node with duplicate id 3 + bag_1000.insert_node_unchecked(node(3, None, None, bag_1000.bag_upper)); + + // then all the nodes after the duplicate are lost (because it is set as the tail) + assert_eq!(bag_as_ids(&bag_1000), vec![2, 3]); + // also in the full iteration, 2 and 3 are from bag_1000 and 1 is from bag_10. + assert_eq!(get_list_as_ids(), vec![2, 3, 1]); + + // and the last accessible node has an **incorrect** prev pointer. + assert_eq!( + Node::::get(&3).unwrap(), + node(3, Some(4), None, bag_1000.bag_upper) + ); + }); + + ExtBuilder::default().build_and_execute_no_post_check(|| { + // when inserting a duplicate id of the head + let mut bag_1000 = Bag::::get(1_000).unwrap(); + assert_eq!(bag_as_ids(&bag_1000), vec![2, 3, 4]); + bag_1000.insert_node_unchecked(node(2, None, None, 0)); + + // then all nodes after the head are lost + assert_eq!(bag_as_ids(&bag_1000), vec![2]); + + // and the re-fetched node has bad pointers + assert_eq!( + Node::::get(&2).unwrap(), + node(2, Some(4), None, bag_1000.bag_upper) + ); + // ^^^ despite being the bags head, it has a prev + + assert_eq!(bag_1000, Bag { head: Some(2), tail: Some(2), bag_upper: 1_000 }) + }); + } + + // Panics in case of duplicate tail insert (which would result in an infinite loop). + #[test] + #[cfg_attr( + debug_assertions, + should_panic = "system logic error: inserting a node who has the id of tail" + )] + fn insert_node_duplicate_tail_panics_with_debug_assert() { + ExtBuilder::default().build_and_execute(|| { + let node = |id, prev, next, bag_upper| Node:: { id, prev, next, bag_upper }; + + // given + assert_eq!(List::::get_bags(), vec![(10, vec![1]), (1_000, vec![2, 3, 4])],); + let mut bag_1000 = Bag::::get(1_000).unwrap(); + + // when inserting a duplicate id that is already the tail + assert_eq!(bag_1000.tail, Some(4)); + assert_eq!(bag_1000.iter().count(), 3); + bag_1000.insert_node_unchecked(node(4, None, None, bag_1000.bag_upper)); // panics in debug + assert_eq!(bag_1000.iter().count(), 3); // in release we expect it to silently ignore the request. + }); + } + + #[test] + fn remove_node_happy_paths_works() { + ExtBuilder::default() + .add_ids(vec![ + (11, 10), + (12, 10), + (13, 1_000), + (14, 1_000), + (15, 2_000), + (16, 2_000), + (17, 2_000), + (18, 2_000), + (19, 2_000), + ]) + .build_and_execute_no_post_check(|| { + let mut bag_10 = Bag::::get(10).unwrap(); + let mut bag_1000 = Bag::::get(1_000).unwrap(); + let mut bag_2000 = Bag::::get(2_000).unwrap(); + + // given + assert_eq!(bag_as_ids(&bag_10), vec![1, 11, 12]); + assert_eq!(bag_as_ids(&bag_1000), vec![2, 3, 4, 13, 14]); + assert_eq!(bag_as_ids(&bag_2000), vec![15, 16, 17, 18, 19]); + + // when removing a node that is not pointing at the head or tail + let node_4 = Node::::get(&4).unwrap(); + let node_4_pre_remove = node_4.clone(); + bag_1000.remove_node_unchecked(&node_4); + + // then + assert_eq!(bag_as_ids(&bag_1000), vec![2, 3, 13, 14]); + assert_ok!(bag_1000.sanity_check()); + // and the node isn't mutated when its removed + assert_eq!(node_4, node_4_pre_remove); + + // when removing a head that is not pointing at the tail + let node_2 = Node::::get(&2).unwrap(); + bag_1000.remove_node_unchecked(&node_2); + + // then + assert_eq!(bag_as_ids(&bag_1000), vec![3, 13, 14]); + assert_ok!(bag_1000.sanity_check()); + + // when removing a tail that is not pointing at the head + let node_14 = Node::::get(&14).unwrap(); + bag_1000.remove_node_unchecked(&node_14); + + // then + assert_eq!(bag_as_ids(&bag_1000), vec![3, 13]); + assert_ok!(bag_1000.sanity_check()); + + // when removing a tail that is pointing at the head + let node_13 = Node::::get(&13).unwrap(); + bag_1000.remove_node_unchecked(&node_13); + + // then + assert_eq!(bag_as_ids(&bag_1000), vec![3]); + assert_ok!(bag_1000.sanity_check()); + + // when removing a node that is both the head & tail + let node_3 = Node::::get(&3).unwrap(); + bag_1000.remove_node_unchecked(&node_3); + bag_1000.put(); // put into storage so `get` returns the updated bag + + // then + assert_eq!(Bag::::get(1_000), None); + + // when removing a node that is pointing at both the head & tail + let node_11 = Node::::get(&11).unwrap(); + bag_10.remove_node_unchecked(&node_11); + + // then + assert_eq!(bag_as_ids(&bag_10), vec![1, 12]); + assert_ok!(bag_10.sanity_check()); + + // when removing a head that is pointing at the tail + let node_1 = Node::::get(&1).unwrap(); + bag_10.remove_node_unchecked(&node_1); + + // then + assert_eq!(bag_as_ids(&bag_10), vec![12]); + assert_ok!(bag_10.sanity_check()); + // and since we updated the bag's head/tail, we need to write this storage so we + // can correctly `get` it again in later checks + bag_10.put(); + + // when removing a node that is pointing at the head but not the tail + let node_16 = Node::::get(&16).unwrap(); + bag_2000.remove_node_unchecked(&node_16); + + // then + assert_eq!(bag_as_ids(&bag_2000), vec![15, 17, 18, 19]); + assert_ok!(bag_2000.sanity_check()); + + // when removing a node that is pointing at tail, but not head + let node_18 = Node::::get(&18).unwrap(); + bag_2000.remove_node_unchecked(&node_18); + + // then + assert_eq!(bag_as_ids(&bag_2000), vec![15, 17, 19]); + assert_ok!(bag_2000.sanity_check()); + + // finally, when reading from storage, the state of all bags is as expected + assert_eq!( + List::::get_bags(), + vec![(10, vec![12]), (2_000, vec![15, 17, 19])] + ); + }); + } + + #[test] + fn remove_node_bad_paths_documented() { + ExtBuilder::default().build_and_execute_no_post_check(|| { + let bad_upper_node_2 = Node:: { + id: 2, + prev: None, + next: Some(3), + bag_upper: 10, // should be 1_000 + }; + let mut bag_1000 = Bag::::get(1_000).unwrap(); + + // when removing a node that is in the bag but has the wrong upper + bag_1000.remove_node_unchecked(&bad_upper_node_2); + bag_1000.put(); + + // then the node is no longer in any bags + assert_eq!(List::::get_bags(), vec![(10, vec![1]), (1_000, vec![3, 4])]); + // .. and the bag it was removed from + let bag_1000 = Bag::::get(1_000).unwrap(); + // is sane + assert_ok!(bag_1000.sanity_check()); + // and has the correct head and tail. + assert_eq!(bag_1000.head, Some(3)); + assert_eq!(bag_1000.tail, Some(4)); + }); + + // Removing a node that is in another bag, will mess up that other bag. + ExtBuilder::default().build_and_execute_no_post_check(|| { + // given a tail node is in bag 1_000 + let node_4 = Node::::get(&4).unwrap(); + + // when we remove it from bag 10 + let mut bag_10 = Bag::::get(10).unwrap(); + bag_10.remove_node_unchecked(&node_4); + bag_10.put(); + + // then bag remove was called on is ok, + let bag_10 = Bag::::get(10).unwrap(); + assert_eq!(bag_10.tail, Some(1)); + assert_eq!(bag_10.head, Some(1)); + + // but the bag that the node belonged to is in an invalid state + let bag_1000 = Bag::::get(1_000).unwrap(); + // because it still has the removed node as its tail. + assert_eq!(bag_1000.tail, Some(4)); + assert_eq!(bag_1000.head, Some(2)); + }); + } +} + +mod node { + use super::*; + + #[test] + fn is_misplaced_works() { + ExtBuilder::default().build_and_execute(|| { + let node = Node::::get(&1).unwrap(); + + // given + assert_eq!(node.bag_upper, 10); + + // then within bag 10 its not misplaced, + assert!(!node.is_misplaced(0)); + assert!(!node.is_misplaced(9)); + assert!(!node.is_misplaced(10)); + + // and out of bag 10 it is misplaced + assert!(node.is_misplaced(11)); + }); + } +} diff --git a/frame/bags-list/src/mock.rs b/frame/bags-list/src/mock.rs new file mode 100644 index 0000000000000..a6ab35896b1e7 --- /dev/null +++ b/frame/bags-list/src/mock.rs @@ -0,0 +1,154 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Mock runtime for pallet-bags-lists tests. + +use super::*; +use crate::{self as bags_list}; +use frame_election_provider_support::VoteWeight; +use frame_support::parameter_types; + +pub type AccountId = u32; +pub type Balance = u32; + +parameter_types! { + pub static NextVoteWeight: VoteWeight = 0; +} + +pub struct StakingMock; +impl frame_election_provider_support::VoteWeightProvider for StakingMock { + fn vote_weight(id: &AccountId) -> VoteWeight { + match id { + 710 => 15, + 711 => 16, + 712 => 2_000, // special cases used for migrate test + _ => NextVoteWeight::get(), + } + } + #[cfg(any(feature = "runtime-benchmarks", test))] + fn set_vote_weight_of(_: &AccountId, weight: VoteWeight) { + // we don't really keep a mapping, just set weight for everyone. + NextVoteWeight::set(weight) + } +} + +impl frame_system::Config for Runtime { + type SS58Prefix = (); + type BaseCallFilter = frame_support::traits::Everything; + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = Call; + type Hash = sp_core::H256; + type Hashing = sp_runtime::traits::BlakeTwo256; + type AccountId = AccountId; + type Lookup = sp_runtime::traits::IdentityLookup; + type Header = sp_runtime::testing::Header; + type Event = Event; + type BlockHashCount = (); + type DbWeight = (); + type BlockLength = (); + type BlockWeights = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type OnSetCode = (); +} + +parameter_types! { + pub static BagThresholds: &'static [VoteWeight] = &[10, 20, 30, 40, 50, 60, 1_000, 2_000, 10_000]; +} + +impl bags_list::Config for Runtime { + type Event = Event; + type WeightInfo = (); + type BagThresholds = BagThresholds; + type VoteWeightProvider = StakingMock; +} + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; +frame_support::construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Storage, Event, Config}, + BagsList: bags_list::{Pallet, Call, Storage, Event}, + } +); + +/// Default AccountIds and their weights. +pub(crate) const GENESIS_IDS: [(AccountId, VoteWeight); 4] = + [(1, 10), (2, 1_000), (3, 1_000), (4, 1_000)]; + +#[derive(Default)] +pub(crate) struct ExtBuilder { + ids: Vec<(AccountId, VoteWeight)>, +} + +impl ExtBuilder { + /// Add some AccountIds to insert into `List`. + pub(crate) fn add_ids(mut self, ids: Vec<(AccountId, VoteWeight)>) -> Self { + self.ids = ids; + self + } + + pub(crate) fn build(self) -> sp_io::TestExternalities { + sp_tracing::try_init_simple(); + let storage = frame_system::GenesisConfig::default().build_storage::().unwrap(); + + let mut ext = sp_io::TestExternalities::from(storage); + ext.execute_with(|| { + for (id, weight) in GENESIS_IDS.iter().chain(self.ids.iter()) { + frame_support::assert_ok!(List::::insert(*id, *weight)); + } + }); + + ext + } + + pub(crate) fn build_and_execute(self, test: impl FnOnce() -> ()) { + self.build().execute_with(|| { + test(); + List::::sanity_check().expect("Sanity check post condition failed") + }) + } + + pub(crate) fn build_and_execute_no_post_check(self, test: impl FnOnce() -> ()) { + self.build().execute_with(test) + } +} + +pub(crate) mod test_utils { + use super::*; + use list::Bag; + + /// Returns the ordered ids within the given bag. + pub(crate) fn bag_as_ids(bag: &Bag) -> Vec { + bag.iter().map(|n| *n.id()).collect::>() + } + + /// Returns the ordered ids from the list. + pub(crate) fn get_list_as_ids() -> Vec { + List::::iter().map(|n| *n.id()).collect::>() + } +} diff --git a/frame/bags-list/src/tests.rs b/frame/bags-list/src/tests.rs new file mode 100644 index 0000000000000..e94017730668b --- /dev/null +++ b/frame/bags-list/src/tests.rs @@ -0,0 +1,389 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_support::{assert_ok, assert_storage_noop, traits::IntegrityTest}; + +use super::*; +use frame_election_provider_support::SortedListProvider; +use list::Bag; +use mock::{test_utils::*, *}; + +mod pallet { + use super::*; + + #[test] + fn rebag_works() { + ExtBuilder::default().add_ids(vec![(42, 20)]).build_and_execute(|| { + // given + assert_eq!( + List::::get_bags(), + vec![(10, vec![1]), (20, vec![42]), (1_000, vec![2, 3, 4])] + ); + + // when increasing vote weight to the level of non-existent bag + NextVoteWeight::set(2_000); + assert_ok!(BagsList::rebag(Origin::signed(0), 42)); + + // then a new bag is created and the id moves into it + assert_eq!( + List::::get_bags(), + vec![(10, vec![1]), (1_000, vec![2, 3, 4]), (2_000, vec![42])] + ); + + // when decreasing weight within the range of the current bag + NextVoteWeight::set(1001); + assert_ok!(BagsList::rebag(Origin::signed(0), 42)); + + // then the id does not move + assert_eq!( + List::::get_bags(), + vec![(10, vec![1]), (1_000, vec![2, 3, 4]), (2_000, vec![42])] + ); + + // when reducing weight to the level of a non-existent bag + NextVoteWeight::set(30); + assert_ok!(BagsList::rebag(Origin::signed(0), 42)); + + // then a new bag is created and the id moves into it + assert_eq!( + List::::get_bags(), + vec![(10, vec![1]), (30, vec![42]), (1_000, vec![2, 3, 4])] + ); + + // when increasing weight to the level of a pre-existing bag + NextVoteWeight::set(500); + assert_ok!(BagsList::rebag(Origin::signed(0), 42)); + + // then the id moves into that bag + assert_eq!( + List::::get_bags(), + vec![(10, vec![1]), (1_000, vec![2, 3, 4, 42])] + ); + }); + } + + // Rebagging the tail of a bag results in the old bag having a new tail and an overall correct + // state. + #[test] + fn rebag_tail_works() { + ExtBuilder::default().build_and_execute(|| { + // given + assert_eq!(List::::get_bags(), vec![(10, vec![1]), (1_000, vec![2, 3, 4])]); + + // when + NextVoteWeight::set(10); + assert_ok!(BagsList::rebag(Origin::signed(0), 4)); + + // then + assert_eq!(List::::get_bags(), vec![(10, vec![1, 4]), (1_000, vec![2, 3])]); + assert_eq!(Bag::::get(1_000).unwrap(), Bag::new(Some(2), Some(3), 1_000)); + + // when + assert_ok!(BagsList::rebag(Origin::signed(0), 3)); + + // then + assert_eq!(List::::get_bags(), vec![(10, vec![1, 4, 3]), (1_000, vec![2])]); + + assert_eq!(Bag::::get(10).unwrap(), Bag::new(Some(1), Some(3), 10)); + assert_eq!(Bag::::get(1_000).unwrap(), Bag::new(Some(2), Some(2), 1_000)); + assert_eq!(get_list_as_ids(), vec![2u32, 1, 4, 3]); + + // when + assert_ok!(BagsList::rebag(Origin::signed(0), 2)); + + // then + assert_eq!(List::::get_bags(), vec![(10, vec![1, 4, 3, 2])]); + assert_eq!(Bag::::get(1_000), None); + }); + } + + // Rebagging the head of a bag results in the old bag having a new head and an overall correct + // state. + #[test] + fn rebag_head_works() { + ExtBuilder::default().build_and_execute(|| { + // when + NextVoteWeight::set(10); + assert_ok!(BagsList::rebag(Origin::signed(0), 2)); + + // then + assert_eq!(List::::get_bags(), vec![(10, vec![1, 2]), (1_000, vec![3, 4])]); + assert_eq!(Bag::::get(1_000).unwrap(), Bag::new(Some(3), Some(4), 1_000)); + + // when + assert_ok!(BagsList::rebag(Origin::signed(0), 3)); + + // then + assert_eq!(List::::get_bags(), vec![(10, vec![1, 2, 3]), (1_000, vec![4])]); + assert_eq!(Bag::::get(1_000).unwrap(), Bag::new(Some(4), Some(4), 1_000)); + + // when + assert_ok!(BagsList::rebag(Origin::signed(0), 4)); + + // then + assert_eq!(List::::get_bags(), vec![(10, vec![1, 2, 3, 4])]); + assert_eq!(Bag::::get(1_000), None); + }); + } + + #[test] + fn wrong_rebag_is_noop() { + ExtBuilder::default().build_and_execute(|| { + let node_3 = list::Node::::get(&3).unwrap(); + // when account 3 is _not_ misplaced with weight 500 + NextVoteWeight::set(500); + assert!(!node_3.is_misplaced(500)); + + // then calling rebag on account 3 with weight 500 is a noop + assert_storage_noop!(assert_eq!(BagsList::rebag(Origin::signed(0), 3), Ok(()))); + + // when account 42 is not in the list + assert!(!BagsList::contains(&42)); + + // then rebag-ing account 42 is a noop + assert_storage_noop!(assert_eq!(BagsList::rebag(Origin::signed(0), 42), Ok(()))); + }); + } + + #[test] + #[should_panic = "thresholds must strictly increase, and have no duplicates"] + fn duplicate_in_bags_threshold_panics() { + const DUPE_THRESH: &[VoteWeight; 4] = &[10, 20, 30, 30]; + BagThresholds::set(DUPE_THRESH); + BagsList::integrity_test(); + } + + #[test] + #[should_panic = "thresholds must strictly increase, and have no duplicates"] + fn decreasing_in_bags_threshold_panics() { + const DECREASING_THRESH: &[VoteWeight; 4] = &[10, 30, 20, 40]; + BagThresholds::set(DECREASING_THRESH); + BagsList::integrity_test(); + } + + #[test] + fn empty_threshold_works() { + BagThresholds::set(Default::default()); // which is the same as passing `()` to `Get<_>`. + + ExtBuilder::default().build_and_execute(|| { + // everyone in the same bag. + assert_eq!(List::::get_bags(), vec![(VoteWeight::MAX, vec![1, 2, 3, 4])]); + + // any insertion goes there as well. + assert_ok!(List::::insert(5, 999)); + assert_ok!(List::::insert(6, 0)); + assert_eq!( + List::::get_bags(), + vec![(VoteWeight::MAX, vec![1, 2, 3, 4, 5, 6])] + ); + + // any rebag is noop. + assert_storage_noop!(assert!(BagsList::rebag(Origin::signed(0), 1).is_ok())); + assert_storage_noop!(assert!(BagsList::rebag(Origin::signed(0), 10).is_ok())); + }) + } +} + +mod sorted_list_provider { + use super::*; + + #[test] + fn iter_works() { + ExtBuilder::default().build_and_execute(|| { + let expected = vec![2, 3, 4, 1]; + for (i, id) in BagsList::iter().enumerate() { + assert_eq!(id, expected[i]) + } + }); + } + + #[test] + fn count_works() { + ExtBuilder::default().build_and_execute(|| { + // given + assert_eq!(BagsList::count(), 4); + + // when inserting + assert_ok!(BagsList::on_insert(201, 0)); + // then the count goes up + assert_eq!(BagsList::count(), 5); + + // when removing + BagsList::on_remove(&201); + // then the count goes down + assert_eq!(BagsList::count(), 4); + + // when updating + BagsList::on_update(&201, VoteWeight::MAX); + // then the count stays the same + assert_eq!(BagsList::count(), 4); + }); + } + + #[test] + fn on_insert_works() { + ExtBuilder::default().build_and_execute(|| { + // when + assert_ok!(BagsList::on_insert(6, 1_000)); + + // then the bags + assert_eq!(List::::get_bags(), vec![(10, vec![1]), (1_000, vec![2, 3, 4, 6])]); + // and list correctly include the new id, + assert_eq!(BagsList::iter().collect::>(), vec![2, 3, 4, 6, 1]); + // and the count is incremented. + assert_eq!(BagsList::count(), 5); + + // when + assert_ok!(BagsList::on_insert(7, 1_001)); + + // then the bags + assert_eq!( + List::::get_bags(), + vec![(10, vec![1]), (1_000, vec![2, 3, 4, 6]), (2_000, vec![7])] + ); + // and list correctly include the new id, + assert_eq!(BagsList::iter().collect::>(), vec![7, 2, 3, 4, 6, 1]); + // and the count is incremented. + assert_eq!(BagsList::count(), 6); + }) + } + + #[test] + fn on_insert_errors_with_duplicate_id() { + ExtBuilder::default().build_and_execute(|| { + // given + assert!(get_list_as_ids().contains(&3)); + + // then + assert_storage_noop!(assert_eq!( + BagsList::on_insert(3, 20).unwrap_err(), + Error::Duplicate + )); + }); + } + + #[test] + fn on_update_works() { + ExtBuilder::default().add_ids(vec![(42, 20)]).build_and_execute(|| { + // given + assert_eq!( + List::::get_bags(), + vec![(10, vec![1]), (20, vec![42]), (1_000, vec![2, 3, 4])] + ); + assert_eq!(BagsList::count(), 5); + + // when increasing weight to the level of non-existent bag + BagsList::on_update(&42, 2_000); + + // then the bag is created with the id in it, + assert_eq!( + List::::get_bags(), + vec![(10, vec![1]), (1_000, vec![2, 3, 4]), (2000, vec![42])] + ); + // and the id position is updated in the list. + assert_eq!(BagsList::iter().collect::>(), vec![42, 2, 3, 4, 1]); + + // when decreasing weight within the range of the current bag + BagsList::on_update(&42, 1_001); + + // then the id does not change bags, + assert_eq!( + List::::get_bags(), + vec![(10, vec![1]), (1_000, vec![2, 3, 4]), (2000, vec![42])] + ); + // or change position in the list. + assert_eq!(BagsList::iter().collect::>(), vec![42, 2, 3, 4, 1]); + + // when increasing weight to the level of a non-existent bag with the max threshold + BagsList::on_update(&42, VoteWeight::MAX); + + // the the new bag is created with the id in it, + assert_eq!( + List::::get_bags(), + vec![(10, vec![1]), (1_000, vec![2, 3, 4]), (VoteWeight::MAX, vec![42])] + ); + // and the id position is updated in the list. + assert_eq!(BagsList::iter().collect::>(), vec![42, 2, 3, 4, 1]); + + // when decreasing the weight to a pre-existing bag + BagsList::on_update(&42, 1_000); + + // then id is moved to the correct bag (as the last member), + assert_eq!( + List::::get_bags(), + vec![(10, vec![1]), (1_000, vec![2, 3, 4, 42])] + ); + // and the id position is updated in the list. + assert_eq!(BagsList::iter().collect::>(), vec![2, 3, 4, 42, 1]); + + // since we have only called on_update, the `count` has not changed. + assert_eq!(BagsList::count(), 5); + }); + } + + #[test] + fn on_remove_works() { + let ensure_left = |id, counter| { + assert!(!ListNodes::::contains_key(id)); + assert_eq!(BagsList::count(), counter); + assert_eq!(CounterForListNodes::::get(), counter); + assert_eq!(ListNodes::::iter().count() as u32, counter); + }; + + ExtBuilder::default().build_and_execute(|| { + // it is a noop removing a non-existent id + assert!(!ListNodes::::contains_key(42)); + assert_storage_noop!(BagsList::on_remove(&42)); + + // when removing a node from a bag with multiple nodes + BagsList::on_remove(&2); + + // then + assert_eq!(get_list_as_ids(), vec![3, 4, 1]); + assert_eq!(List::::get_bags(), vec![(10, vec![1]), (1_000, vec![3, 4])]); + ensure_left(2, 3); + + // when removing a node from a bag with only one node + BagsList::on_remove(&1); + + // then + assert_eq!(get_list_as_ids(), vec![3, 4]); + assert_eq!(List::::get_bags(), vec![(1_000, vec![3, 4])]); + ensure_left(1, 2); + + // when removing all remaining ids + BagsList::on_remove(&4); + assert_eq!(get_list_as_ids(), vec![3]); + ensure_left(4, 1); + BagsList::on_remove(&3); + + // then the storage is completely cleaned up + assert_eq!(get_list_as_ids(), Vec::::new()); + ensure_left(3, 0); + }); + } + + #[test] + fn contains_works() { + ExtBuilder::default().build_and_execute(|| { + assert!(GENESIS_IDS.iter().all(|(id, _)| BagsList::contains(id))); + + let non_existent_ids = vec![&42, &666, &13]; + assert!(non_existent_ids.iter().all(|id| !BagsList::contains(id))); + }) + } +} diff --git a/frame/bags-list/src/weights.rs b/frame/bags-list/src/weights.rs new file mode 100644 index 0000000000000..95d3dfa6eb989 --- /dev/null +++ b/frame/bags-list/src/weights.rs @@ -0,0 +1,95 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_bags_list +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-09-15, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_bags_list +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/bags-list/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_bags_list. +pub trait WeightInfo { + fn rebag_non_terminal() -> Weight; + fn rebag_terminal() -> Weight; +} + +/// Weights for pallet_bags_list using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: Staking Bonded (r:1 w:0) + // Storage: Staking Ledger (r:1 w:0) + // Storage: BagsList ListNodes (r:4 w:4) + // Storage: BagsList ListBags (r:1 w:1) + fn rebag_non_terminal() -> Weight { + (74_175_000 as Weight) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) + } + // Storage: Staking Bonded (r:1 w:0) + // Storage: Staking Ledger (r:1 w:0) + // Storage: BagsList ListNodes (r:3 w:3) + // Storage: BagsList ListBags (r:2 w:2) + fn rebag_terminal() -> Weight { + (73_305_000 as Weight) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: Staking Bonded (r:1 w:0) + // Storage: Staking Ledger (r:1 w:0) + // Storage: BagsList ListNodes (r:4 w:4) + // Storage: BagsList ListBags (r:1 w:1) + fn rebag_non_terminal() -> Weight { + (74_175_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + } + // Storage: Staking Bonded (r:1 w:0) + // Storage: Staking Ledger (r:1 w:0) + // Storage: BagsList ListNodes (r:3 w:3) + // Storage: BagsList ListBags (r:2 w:2) + fn rebag_terminal() -> Weight { + (73_305_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + } +} diff --git a/frame/balances/src/benchmarking.rs b/frame/balances/src/benchmarking.rs index 97c3c4309a80d..1c48820094187 100644 --- a/frame/balances/src/benchmarking.rs +++ b/frame/balances/src/benchmarking.rs @@ -21,9 +21,7 @@ use super::*; -use frame_benchmarking::{ - account, benchmarks_instance_pallet, impl_benchmark_test_suite, whitelisted_caller, -}; +use frame_benchmarking::{account, benchmarks_instance_pallet, whitelisted_caller}; use frame_system::RawOrigin; use sp_runtime::traits::Bounded; @@ -195,10 +193,30 @@ benchmarks_instance_pallet! { assert!(Balances::::free_balance(&caller).is_zero()); assert_eq!(Balances::::free_balance(&recipient), balance); } -} -impl_benchmark_test_suite!( - Balances, - crate::tests_composite::ExtBuilder::default().build(), - crate::tests_composite::Test, -); + force_unreserve { + let user: T::AccountId = account("user", 0, SEED); + let user_lookup: ::Source = T::Lookup::unlookup(user.clone()); + + // Give some multiple of the existential deposit + let existential_deposit = T::ExistentialDeposit::get(); + let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); + let _ = as Currency<_>>::make_free_balance_be(&user, balance); + + // Reserve the balance + as ReservableCurrency<_>>::reserve(&user, balance)?; + assert_eq!(Balances::::reserved_balance(&user), balance); + assert!(Balances::::free_balance(&user).is_zero()); + + }: _(RawOrigin::Root, user_lookup, balance) + verify { + assert!(Balances::::reserved_balance(&user).is_zero()); + assert_eq!(Balances::::free_balance(&user), balance); + } + + impl_benchmark_test_suite!( + Balances, + crate::tests_composite::ExtBuilder::default().build(), + crate::tests_composite::Test, + ) +} diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index f7102ad4895f9..afd2331c8e3cf 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -167,6 +167,7 @@ use codec::{Codec, Decode, Encode, MaxEncodedLen}; use frame_support::traits::GenesisBuild; use frame_support::{ ensure, + pallet_prelude::DispatchResult, traits::{ tokens::{fungible, BalanceStatus as Status, DepositConsequence, WithdrawConsequence}, Currency, ExistenceRequirement, @@ -183,7 +184,7 @@ use sp_runtime::{ AtLeast32BitUnsigned, Bounded, CheckedAdd, CheckedSub, MaybeSerializeDeserialize, Saturating, StaticLookup, Zero, }, - ArithmeticError, DispatchError, DispatchResult, RuntimeDebug, + ArithmeticError, DispatchError, RuntimeDebug, }; use sp_std::{cmp, fmt::Debug, mem, ops::BitOr, prelude::*, result}; pub use weights::WeightInfo; @@ -419,7 +420,7 @@ pub mod pallet { origin: OriginFor, dest: ::Source, keep_alive: bool, - ) -> DispatchResultWithPostInfo { + ) -> DispatchResult { use fungible::Inspect; let transactor = ensure_signed(origin)?; let reducible_balance = Self::reducible_balance(&transactor, keep_alive); @@ -431,7 +432,22 @@ pub mod pallet { reducible_balance, keep_alive.into(), )?; - Ok(().into()) + Ok(()) + } + + /// Unreserve some balance from a user by force. + /// + /// Can only be called by ROOT. + #[pallet::weight(T::WeightInfo::force_unreserve())] + pub fn force_unreserve( + origin: OriginFor, + who: ::Source, + amount: T::Balance, + ) -> DispatchResult { + ensure_root(origin)?; + let who = T::Lookup::lookup(who)?; + let _leftover = >::unreserve(&who, amount); + Ok(()) } } diff --git a/frame/balances/src/weights.rs b/frame/balances/src/weights.rs index 9fce8d4fde266..6f333bfc0500f 100644 --- a/frame/balances/src/weights.rs +++ b/frame/balances/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_balances //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2021-08-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2021-09-13, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -51,6 +51,7 @@ pub trait WeightInfo { fn set_balance_killing() -> Weight; fn force_transfer() -> Weight; fn transfer_all() -> Weight; + fn force_unreserve() -> Weight; } /// Weights for pallet_balances using the Substrate node and recommended hardware. @@ -58,37 +59,43 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - (72_229_000 as Weight) + (70_952_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:1) fn transfer_keep_alive() -> Weight { - (55_013_000 as Weight) + (54_410_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:1) fn set_balance_creating() -> Weight { - (29_404_000 as Weight) + (29_176_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:1) fn set_balance_killing() -> Weight { - (36_311_000 as Weight) + (35_214_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:2 w:2) fn force_transfer() -> Weight { - (73_125_000 as Weight) + (71_780_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: System Account (r:1 w:1) fn transfer_all() -> Weight { - (67_749_000 as Weight) + (66_475_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: System Account (r:1 w:1) + fn force_unreserve() -> Weight { + (27_766_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -98,37 +105,43 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - (72_229_000 as Weight) + (70_952_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:1) fn transfer_keep_alive() -> Weight { - (55_013_000 as Weight) + (54_410_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:1) fn set_balance_creating() -> Weight { - (29_404_000 as Weight) + (29_176_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:1) fn set_balance_killing() -> Weight { - (36_311_000 as Weight) + (35_214_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:2 w:2) fn force_transfer() -> Weight { - (73_125_000 as Weight) + (71_780_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: System Account (r:1 w:1) fn transfer_all() -> Weight { - (67_749_000 as Weight) + (66_475_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: System Account (r:1 w:1) + fn force_unreserve() -> Weight { + (27_766_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } diff --git a/frame/beefy-mmr/Cargo.toml b/frame/beefy-mmr/Cargo.toml new file mode 100644 index 0000000000000..3d4a9a72ddf86 --- /dev/null +++ b/frame/beefy-mmr/Cargo.toml @@ -0,0 +1,56 @@ +[package] +name = "pallet-beefy-mmr" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +description = "BEEFY + MMR runtime utilities" + +[dependencies] +hex = { version = "0.4", optional = true } +codec = { version = "2.2.0", package = "parity-scale-codec", default-features = false, features = ["derive"] } +libsecp256k1 = { version = "0.7.0", default-features = false } +log = { version = "0.4.13", default-features = false } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +serde = { version = "1.0.130", optional = true } + +frame-support = { version = "4.0.0-dev", path = "../support", default-features = false } +frame-system = { version = "4.0.0-dev", path = "../system", default-features = false } +pallet-mmr = { version = "4.0.0-dev", path = "../merkle-mountain-range", default-features = false } +pallet-mmr-primitives = { version = "4.0.0-dev", path = "../merkle-mountain-range/primitives", default-features = false } +pallet-session = { version = "4.0.0-dev", path = "../session", default-features = false } + +sp-core = { version = "4.0.0-dev", path = "../../primitives/core", default-features = false } +sp-io = { version = "4.0.0-dev", path = "../../primitives/io", default-features = false } +sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime", default-features = false } +sp-std = { version = "4.0.0-dev", path = "../../primitives/std", default-features = false } + +beefy-merkle-tree = { version = "4.0.0-dev", path = "./primitives", default-features = false } +beefy-primitives = { version = "4.0.0-dev", path = "../../primitives/beefy", default-features = false } +pallet-beefy = { version = "4.0.0-dev", path = "../beefy", default-features = false } + +[dev-dependencies] +sp-staking = { version = "4.0.0-dev", path = "../../primitives/staking" } +hex-literal = "0.3" + +[features] +default = ["std"] +std = [ + "beefy-merkle-tree/std", + "beefy-primitives/std", + "codec/std", + "frame-support/std", + "frame-system/std", + "hex", + "libsecp256k1/std", + "log/std", + "pallet-beefy/std", + "pallet-mmr-primitives/std", + "pallet-mmr/std", + "pallet-session/std", + "serde", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", +] diff --git a/frame/beefy-mmr/primitives/Cargo.toml b/frame/beefy-mmr/primitives/Cargo.toml new file mode 100644 index 0000000000000..d5dcc0eed3350 --- /dev/null +++ b/frame/beefy-mmr/primitives/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "beefy-merkle-tree" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +description = "A no-std/Substrate compatible library to construct binary merkle tree." + +[dependencies] +hex = { version = "0.4", optional = true, default-features = false } +log = { version = "0.4", optional = true, default-features = false } +tiny-keccak = { version = "2.0.2", features = ["keccak"], optional = true } + +[dev-dependencies] +env_logger = "0.9" +hex = "0.4" +hex-literal = "0.3" + +[features] +debug = ["hex", "log"] +default = ["std", "debug", "keccak"] +keccak = ["tiny-keccak"] +std = [] diff --git a/frame/beefy-mmr/primitives/src/lib.rs b/frame/beefy-mmr/primitives/src/lib.rs new file mode 100644 index 0000000000000..4d4d4e8721ac8 --- /dev/null +++ b/frame/beefy-mmr/primitives/src/lib.rs @@ -0,0 +1,806 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg_attr(not(feature = "std"), no_std)] +#![warn(missing_docs)] + +//! This crate implements a simple binary Merkle Tree utilities required for inter-op with Ethereum +//! bridge & Solidity contract. +//! +//! The implementation is optimised for usage within Substrate Runtime and supports no-std +//! compilation targets. +//! +//! Merkle Tree is constructed from arbitrary-length leaves, that are initially hashed using the +//! same [Hasher] as the inner nodes. +//! Inner nodes are created by concatenating child hashes and hashing again. The implementation +//! does not perform any sorting of the input data (leaves) nor when inner nodes are created. +//! +//! If the number of leaves is not even, last leave (hash of) is promoted to the upper layer. + +#[cfg(not(feature = "std"))] +extern crate alloc; +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; + +/// Supported hashing output size. +/// +/// The size is restricted to 32 bytes to allow for a more optimised implementation. +pub type Hash = [u8; 32]; + +/// Generic hasher trait. +/// +/// Implement the function to support custom way of hashing data. +/// The implementation must return a [Hash] type, so only 32-byte output hashes are supported. +pub trait Hasher { + /// Hash given arbitrary-length piece of data. + fn hash(data: &[u8]) -> Hash; +} + +#[cfg(feature = "keccak")] +mod keccak256 { + use tiny_keccak::{Hasher as _, Keccak}; + + /// Keccak256 hasher implementation. + pub struct Keccak256; + impl Keccak256 { + /// Hash given data. + pub fn hash(data: &[u8]) -> super::Hash { + ::hash(data) + } + } + impl super::Hasher for Keccak256 { + fn hash(data: &[u8]) -> super::Hash { + let mut keccak = Keccak::v256(); + keccak.update(data); + let mut output = [0_u8; 32]; + keccak.finalize(&mut output); + output + } + } +} +#[cfg(feature = "keccak")] +pub use keccak256::Keccak256; + +/// Construct a root hash of a Binary Merkle Tree created from given leaves. +/// +/// See crate-level docs for details about Merkle Tree construction. +/// +/// In case an empty list of leaves is passed the function returns a 0-filled hash. +pub fn merkle_root(leaves: I) -> Hash +where + H: Hasher, + I: IntoIterator, + T: AsRef<[u8]>, +{ + let iter = leaves.into_iter().map(|l| H::hash(l.as_ref())); + merkelize::(iter, &mut ()) +} + +fn merkelize(leaves: I, visitor: &mut V) -> Hash +where + H: Hasher, + V: Visitor, + I: Iterator, +{ + let upper = Vec::with_capacity(leaves.size_hint().0); + let mut next = match merkelize_row::(leaves, upper, visitor) { + Ok(root) => return root, + Err(next) if next.is_empty() => return Hash::default(), + Err(next) => next, + }; + + let mut upper = Vec::with_capacity((next.len() + 1) / 2); + loop { + visitor.move_up(); + + match merkelize_row::(next.drain(..), upper, visitor) { + Ok(root) => return root, + Err(t) => { + // swap collections to avoid allocations + upper = next; + next = t; + }, + }; + } +} + +/// A generated merkle proof. +/// +/// The structure contains all necessary data to later on verify the proof and the leaf itself. +#[derive(Debug, PartialEq, Eq)] +pub struct MerkleProof { + /// Root hash of generated merkle tree. + pub root: Hash, + /// Proof items (does not contain the leaf hash, nor the root obviously). + /// + /// This vec contains all inner node hashes necessary to reconstruct the root hash given the + /// leaf hash. + pub proof: Vec, + /// Number of leaves in the original tree. + /// + /// This is needed to detect a case where we have an odd number of leaves that "get promoted" + /// to upper layers. + pub number_of_leaves: usize, + /// Index of the leaf the proof is for (0-based). + pub leaf_index: usize, + /// Leaf content. + pub leaf: T, +} + +/// A trait of object inspecting merkle root creation. +/// +/// It can be passed to [`merkelize_row`] or [`merkelize`] functions and will be notified +/// about tree traversal. +trait Visitor { + /// We are moving one level up in the tree. + fn move_up(&mut self); + + /// We are creating an inner node from given `left` and `right` nodes. + /// + /// Note that in case of last odd node in the row `right` might be empty. + /// The method will also visit the `root` hash (level 0). + /// + /// The `index` is an index of `left` item. + fn visit(&mut self, index: usize, left: &Option, right: &Option); +} + +/// No-op implementation of the visitor. +impl Visitor for () { + fn move_up(&mut self) {} + fn visit(&mut self, _index: usize, _left: &Option, _right: &Option) {} +} + +/// Construct a Merkle Proof for leaves given by indices. +/// +/// The function constructs a (partial) Merkle Tree first and stores all elements required +/// to prove requested item (leaf) given the root hash. +/// +/// Both the Proof and the Root Hash is returned. +/// +/// # Panic +/// +/// The function will panic if given [`leaf_index`] is greater than the number of leaves. +pub fn merkle_proof(leaves: I, leaf_index: usize) -> MerkleProof +where + H: Hasher, + I: IntoIterator, + I::IntoIter: ExactSizeIterator, + T: AsRef<[u8]>, +{ + let mut leaf = None; + let iter = leaves.into_iter().enumerate().map(|(idx, l)| { + let hash = H::hash(l.as_ref()); + if idx == leaf_index { + leaf = Some(l); + } + hash + }); + + /// The struct collects a proof for single leaf. + struct ProofCollection { + proof: Vec, + position: usize, + } + + impl ProofCollection { + fn new(position: usize) -> Self { + ProofCollection { proof: Default::default(), position } + } + } + + impl Visitor for ProofCollection { + fn move_up(&mut self) { + self.position /= 2; + } + + fn visit(&mut self, index: usize, left: &Option, right: &Option) { + // we are at left branch - right goes to the proof. + if self.position == index { + if let Some(right) = right { + self.proof.push(*right); + } + } + // we are at right branch - left goes to the proof. + if self.position == index + 1 { + if let Some(left) = left { + self.proof.push(*left); + } + } + } + } + + let number_of_leaves = iter.len(); + let mut collect_proof = ProofCollection::new(leaf_index); + + let root = merkelize::(iter, &mut collect_proof); + let leaf = leaf.expect("Requested `leaf_index` is greater than number of leaves."); + + #[cfg(feature = "debug")] + log::debug!( + "[merkle_proof] Proof: {:?}", + collect_proof.proof.iter().map(hex::encode).collect::>() + ); + + MerkleProof { root, proof: collect_proof.proof, number_of_leaves, leaf_index, leaf } +} + +/// Leaf node for proof verification. +/// +/// Can be either a value that needs to be hashed first, +/// or the hash itself. +#[derive(Debug, PartialEq, Eq)] +pub enum Leaf<'a> { + /// Leaf content. + Value(&'a [u8]), + /// Hash of the leaf content. + Hash(Hash), +} + +impl<'a, T: AsRef<[u8]>> From<&'a T> for Leaf<'a> { + fn from(v: &'a T) -> Self { + Leaf::Value(v.as_ref()) + } +} + +impl<'a> From for Leaf<'a> { + fn from(v: Hash) -> Self { + Leaf::Hash(v) + } +} + +/// Verify Merkle Proof correctness versus given root hash. +/// +/// The proof is NOT expected to contain leaf hash as the first +/// element, but only all adjacent nodes required to eventually by process of +/// concatenating and hashing end up with given root hash. +/// +/// The proof must not contain the root hash. +pub fn verify_proof<'a, H, P, L>( + root: &'a Hash, + proof: P, + number_of_leaves: usize, + leaf_index: usize, + leaf: L, +) -> bool +where + H: Hasher, + P: IntoIterator, + L: Into>, +{ + if leaf_index >= number_of_leaves { + return false + } + + let leaf_hash = match leaf.into() { + Leaf::Value(content) => H::hash(content), + Leaf::Hash(hash) => hash, + }; + + let mut combined = [0_u8; 64]; + let mut position = leaf_index; + let mut width = number_of_leaves; + let computed = proof.into_iter().fold(leaf_hash, |a, b| { + if position % 2 == 1 || position + 1 == width { + combined[0..32].copy_from_slice(&b); + combined[32..64].copy_from_slice(&a); + } else { + combined[0..32].copy_from_slice(&a); + combined[32..64].copy_from_slice(&b); + } + let hash = H::hash(&combined); + #[cfg(feature = "debug")] + log::debug!( + "[verify_proof]: (a, b) {:?}, {:?} => {:?} ({:?}) hash", + hex::encode(a), + hex::encode(b), + hex::encode(hash), + hex::encode(combined) + ); + position /= 2; + width = ((width - 1) / 2) + 1; + hash + }); + + root == &computed +} + +/// Processes a single row (layer) of a tree by taking pairs of elements, +/// concatenating them, hashing and placing into resulting vector. +/// +/// In case only one element is provided it is returned via `Ok` result, in any other case (also an +/// empty iterator) an `Err` with the inner nodes of upper layer is returned. +fn merkelize_row( + mut iter: I, + mut next: Vec, + visitor: &mut V, +) -> Result> +where + H: Hasher, + V: Visitor, + I: Iterator, +{ + #[cfg(feature = "debug")] + log::debug!("[merkelize_row]"); + next.clear(); + + let mut index = 0; + let mut combined = [0_u8; 64]; + loop { + let a = iter.next(); + let b = iter.next(); + visitor.visit(index, &a, &b); + + #[cfg(feature = "debug")] + log::debug!(" {:?}\n {:?}", a.as_ref().map(hex::encode), b.as_ref().map(hex::encode)); + + index += 2; + match (a, b) { + (Some(a), Some(b)) => { + combined[0..32].copy_from_slice(&a); + combined[32..64].copy_from_slice(&b); + + next.push(H::hash(&combined)); + }, + // Odd number of items. Promote the item to the upper layer. + (Some(a), None) if !next.is_empty() => { + next.push(a); + }, + // Last item = root. + (Some(a), None) => return Ok(a), + // Finish up, no more items. + _ => { + #[cfg(feature = "debug")] + log::debug!( + "[merkelize_row] Next: {:?}", + next.iter().map(hex::encode).collect::>() + ); + return Err(next) + }, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use hex_literal::hex; + + #[test] + fn should_generate_empty_root() { + // given + let _ = env_logger::try_init(); + let data: Vec<[u8; 1]> = Default::default(); + + // when + let out = merkle_root::(data); + + // then + assert_eq!( + hex::encode(&out), + "0000000000000000000000000000000000000000000000000000000000000000" + ); + } + + #[test] + fn should_generate_single_root() { + // given + let _ = env_logger::try_init(); + let data = vec![hex!("E04CC55ebEE1cBCE552f250e85c57B70B2E2625b")]; + + // when + let out = merkle_root::(data); + + // then + assert_eq!( + hex::encode(&out), + "aeb47a269393297f4b0a3c9c9cfd00c7a4195255274cf39d83dabc2fcc9ff3d7" + ); + } + + #[test] + fn should_generate_root_pow_2() { + // given + let _ = env_logger::try_init(); + let data = vec![ + hex!("E04CC55ebEE1cBCE552f250e85c57B70B2E2625b"), + hex!("25451A4de12dcCc2D166922fA938E900fCc4ED24"), + ]; + + // when + let out = merkle_root::(data); + + // then + assert_eq!( + hex::encode(&out), + "697ea2a8fe5b03468548a7a413424a6292ab44a82a6f5cc594c3fa7dda7ce402" + ); + } + + #[test] + fn should_generate_root_complex() { + let _ = env_logger::try_init(); + let test = |root, data| { + assert_eq!(hex::encode(&merkle_root::(data)), root); + }; + + test( + "aff1208e69c9e8be9b584b07ebac4e48a1ee9d15ce3afe20b77a4d29e4175aa3", + vec!["a", "b", "c"], + ); + + test( + "b8912f7269068901f231a965adfefbc10f0eedcfa61852b103efd54dac7db3d7", + vec!["a", "b", "a"], + ); + + test( + "dc8e73fe6903148ff5079baecc043983625c23b39f31537e322cd0deee09fa9c", + vec!["a", "b", "a", "b"], + ); + + test( + "fb3b3be94be9e983ba5e094c9c51a7d96a4fa2e5d8e891df00ca89ba05bb1239", + vec!["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"], + ); + } + + #[test] + fn should_generate_and_verify_proof_simple() { + // given + let _ = env_logger::try_init(); + let data = vec!["a", "b", "c"]; + + // when + let proof0 = merkle_proof::(data.clone(), 0); + assert!(verify_proof::( + &proof0.root, + proof0.proof.clone(), + data.len(), + proof0.leaf_index, + &proof0.leaf, + )); + + let proof1 = merkle_proof::(data.clone(), 1); + assert!(verify_proof::( + &proof1.root, + proof1.proof, + data.len(), + proof1.leaf_index, + &proof1.leaf, + )); + + let proof2 = merkle_proof::(data.clone(), 2); + assert!(verify_proof::( + &proof2.root, + proof2.proof, + data.len(), + proof2.leaf_index, + &proof2.leaf + )); + + // then + assert_eq!(hex::encode(proof0.root), hex::encode(proof1.root)); + assert_eq!(hex::encode(proof2.root), hex::encode(proof1.root)); + + assert!(!verify_proof::( + &hex!("fb3b3be94be9e983ba5e094c9c51a7d96a4fa2e5d8e891df00ca89ba05bb1239"), + proof0.proof, + data.len(), + proof0.leaf_index, + &proof0.leaf + )); + + assert!(!verify_proof::( + &proof0.root, + vec![], + data.len(), + proof0.leaf_index, + &proof0.leaf + )); + } + + #[test] + fn should_generate_and_verify_proof_complex() { + // given + let _ = env_logger::try_init(); + let data = vec!["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"]; + + for l in 0..data.len() { + // when + let proof = merkle_proof::(data.clone(), l); + // then + assert!(verify_proof::( + &proof.root, + proof.proof, + data.len(), + proof.leaf_index, + &proof.leaf + )); + } + } + + #[test] + fn should_generate_and_verify_proof_large() { + // given + let _ = env_logger::try_init(); + let mut data = vec![]; + for i in 1..16 { + for c in 'a'..'z' { + if c as usize % i != 0 { + data.push(c.to_string()); + } + } + + for l in 0..data.len() { + // when + let proof = merkle_proof::(data.clone(), l); + // then + assert!(verify_proof::( + &proof.root, + proof.proof, + data.len(), + proof.leaf_index, + &proof.leaf + )); + } + } + } + + #[test] + fn should_generate_and_verify_proof_large_tree() { + // given + let _ = env_logger::try_init(); + let mut data = vec![]; + for i in 0..6000 { + data.push(format!("{}", i)); + } + + for l in (0..data.len()).step_by(13) { + // when + let proof = merkle_proof::(data.clone(), l); + // then + assert!(verify_proof::( + &proof.root, + proof.proof, + data.len(), + proof.leaf_index, + &proof.leaf + )); + } + } + + #[test] + #[should_panic] + fn should_panic_on_invalid_leaf_index() { + let _ = env_logger::try_init(); + merkle_proof::(vec!["a"], 5); + } + + #[test] + fn should_generate_and_verify_proof_on_test_data() { + let addresses = vec![ + "0x9aF1Ca5941148eB6A3e9b9C741b69738292C533f", + "0xDD6ca953fddA25c496165D9040F7F77f75B75002", + "0x60e9C47B64Bc1C7C906E891255EaEC19123E7F42", + "0xfa4859480Aa6D899858DE54334d2911E01C070df", + "0x19B9b128470584F7209eEf65B69F3624549Abe6d", + "0xC436aC1f261802C4494504A11fc2926C726cB83b", + "0xc304C8C2c12522F78aD1E28dD86b9947D7744bd0", + "0xDa0C2Cba6e832E55dE89cF4033affc90CC147352", + "0xf850Fd22c96e3501Aad4CDCBf38E4AEC95622411", + "0x684918D4387CEb5E7eda969042f036E226E50642", + "0x963F0A1bFbb6813C0AC88FcDe6ceB96EA634A595", + "0x39B38ad74b8bCc5CE564f7a27Ac19037A95B6099", + "0xC2Dec7Fdd1fef3ee95aD88EC8F3Cd5bd4065f3C7", + "0x9E311f05c2b6A43C2CCF16fB2209491BaBc2ec01", + "0x927607C30eCE4Ef274e250d0bf414d4a210b16f0", + "0x98882bcf85E1E2DFF780D0eB360678C1cf443266", + "0xFBb50191cd0662049E7C4EE32830a4Cc9B353047", + "0x963854fc2C358c48C3F9F0A598B9572c581B8DEF", + "0xF9D7Bc222cF6e3e07bF66711e6f409E51aB75292", + "0xF2E3fd32D063F8bBAcB9e6Ea8101C2edd899AFe6", + "0x407a5b9047B76E8668570120A96d580589fd1325", + "0xEAD9726FAFB900A07dAd24a43AE941d2eFDD6E97", + "0x42f5C8D9384034A9030313B51125C32a526b6ee8", + "0x158fD2529Bc4116570Eb7C80CC76FEf33ad5eD95", + "0x0A436EE2E4dEF3383Cf4546d4278326Ccc82514E", + "0x34229A215db8FeaC93Caf8B5B255e3c6eA51d855", + "0xEb3B7CF8B1840242CB98A732BA464a17D00b5dDF", + "0x2079692bf9ab2d6dc7D79BBDdEE71611E9aA3B72", + "0x46e2A67e5d450e2Cf7317779f8274a2a630f3C9B", + "0xA7Ece4A5390DAB18D08201aE18800375caD78aab", + "0x15E1c0D24D62057Bf082Cb2253dA11Ef0d469570", + "0xADDEF4C9b5687Eb1F7E55F2251916200A3598878", + "0xe0B16Fb96F936035db2b5A68EB37D470fED2f013", + "0x0c9A84993feaa779ae21E39F9793d09e6b69B62D", + "0x3bc4D5148906F70F0A7D1e2756572655fd8b7B34", + "0xFf4675C26903D5319795cbd3a44b109E7DDD9fDe", + "0xCec4450569A8945C6D2Aba0045e4339030128a92", + "0x85f0584B10950E421A32F471635b424063FD8405", + "0xb38bEe7Bdc0bC43c096e206EFdFEad63869929E3", + "0xc9609466274Fef19D0e58E1Ee3b321D5C141067E", + "0xa08EA868cF75268E7401021E9f945BAe73872ecc", + "0x67C9Cb1A29E964Fe87Ff669735cf7eb87f6868fE", + "0x1B6BEF636aFcdd6085cD4455BbcC93796A12F6E2", + "0x46B37b243E09540b55cF91C333188e7D5FD786dD", + "0x8E719E272f62Fa97da93CF9C941F5e53AA09e44a", + "0xa511B7E7DB9cb24AD5c89fBb6032C7a9c2EfA0a5", + "0x4D11FDcAeD335d839132AD450B02af974A3A66f8", + "0xB8cf790a5090E709B4619E1F335317114294E17E", + "0x7f0f57eA064A83210Cafd3a536866ffD2C5eDCB3", + "0xC03C848A4521356EF800e399D889e9c2A25D1f9E", + "0xC6b03DF05cb686D933DD31fCa5A993bF823dc4FE", + "0x58611696b6a8102cf95A32c25612E4cEF32b910F", + "0x2ed4bC7197AEF13560F6771D930Bf907772DE3CE", + "0x3C5E58f334306be029B0e47e119b8977B2639eb4", + "0x288646a1a4FeeC560B349d210263c609aDF649a6", + "0xb4F4981E0d027Dc2B3c86afA0D0fC03d317e83C0", + "0xaAE4A87F8058feDA3971f9DEd639Ec9189aA2500", + "0x355069DA35E598913d8736E5B8340527099960b8", + "0x3cf5A0F274cd243C0A186d9fCBdADad089821B93", + "0xca55155dCc4591538A8A0ca322a56EB0E4aD03C4", + "0xE824D0268366ec5C4F23652b8eD70D552B1F2b8B", + "0x84C3e9B25AE8a9b39FF5E331F9A597F2DCf27Ca9", + "0xcA0018e278751De10d26539915d9c7E7503432FE", + "0xf13077dE6191D6c1509ac7E088b8BE7Fe656c28b", + "0x7a6bcA1ec9Db506e47ac6FD86D001c2aBc59C531", + "0xeA7f9A2A9dd6Ba9bc93ca615C3Ddf26973146911", + "0x8D0d8577e16F8731d4F8712BAbFa97aF4c453458", + "0xB7a7855629dF104246997e9ACa0E6510df75d0ea", + "0x5C1009BDC70b0C8Ab2e5a53931672ab448C17c89", + "0x40B47D1AfefEF5eF41e0789F0285DE7b1C31631C", + "0x5086933d549cEcEB20652CE00973703CF10Da373", + "0xeb364f6FE356882F92ae9314fa96116Cf65F47d8", + "0xdC4D31516A416cEf533C01a92D9a04bbdb85EE67", + "0x9b36E086E5A274332AFd3D8509e12ca5F6af918d", + "0xBC26394fF36e1673aE0608ce91A53B9768aD0D76", + "0x81B5AB400be9e563fA476c100BE898C09966426c", + "0x9d93C8ae5793054D28278A5DE6d4653EC79e90FE", + "0x3B8E75804F71e121008991E3177fc942b6c28F50", + "0xC6Eb5886eB43dD473f5BB4e21e56E08dA464D9B4", + "0xfdf1277b71A73c813cD0e1a94B800f4B1Db66DBE", + "0xc2ff2cCc98971556670e287Ff0CC39DA795231ad", + "0x76b7E1473f0D0A87E9B4a14E2B179266802740f5", + "0xA7Bc965660a6EF4687CCa4F69A97563163A3C2Ef", + "0xB9C2b47888B9F8f7D03dC1de83F3F55E738CebD3", + "0xEd400162E6Dd6bD2271728FFb04176bF770De94a", + "0xE3E8331156700339142189B6E555DCb2c0962750", + "0xbf62e342Bc7706a448EdD52AE871d9C4497A53b1", + "0xb9d7A1A111eed75714a0AcD2dd467E872eE6B03D", + "0x03942919DFD0383b8c574AB8A701d89fd4bfA69D", + "0x0Ef4C92355D3c8c7050DFeb319790EFCcBE6fe9e", + "0xA6895a3cf0C60212a73B3891948ACEcF1753f25E", + "0x0Ed509239DB59ef3503ded3d31013C983d52803A", + "0xc4CE8abD123BfAFc4deFf37c7D11DeCd5c350EE4", + "0x4A4Bf59f7038eDcd8597004f35d7Ee24a7Bdd2d3", + "0x5769E8e8A2656b5ed6b6e6fa2a2bFAeaf970BB87", + "0xf9E15cCE181332F4F57386687c1776b66C377060", + "0xc98f8d4843D56a46C21171900d3eE538Cc74dbb5", + "0x3605965B47544Ce4302b988788B8195601AE4dEd", + "0xe993BDfdcAac2e65018efeE0F69A12678031c71d", + "0x274fDf8801385D3FAc954BCc1446Af45f5a8304c", + "0xBFb3f476fcD6429F4a475bA23cEFdDdd85c6b964", + "0x806cD16588Fe812ae740e931f95A289aFb4a4B50", + "0xa89488CE3bD9C25C3aF797D1bbE6CA689De79d81", + "0xd412f1AfAcf0Ebf3Cd324593A231Fc74CC488B12", + "0xd1f715b2D7951d54bc31210BbD41852D9BF98Ed1", + "0xf65aD707c344171F467b2ADba3d14f312219cE23", + "0x2971a4b242e9566dEF7bcdB7347f5E484E11919B", + "0x12b113D6827E07E7D426649fBd605f427da52314", + "0x1c6CA45171CDb9856A6C9Dba9c5F1216913C1e97", + "0x11cC6ee1d74963Db23294FCE1E3e0A0555779CeA", + "0x8Aa1C721255CDC8F895E4E4c782D86726b068667", + "0xA2cDC1f37510814485129aC6310b22dF04e9Bbf0", + "0xCf531b71d388EB3f5889F1f78E0d77f6fb109767", + "0xBe703e3545B2510979A0cb0C440C0Fba55c6dCB5", + "0x30a35886F989db39c797D8C93880180Fdd71b0c8", + "0x1071370D981F60c47A9Cd27ac0A61873a372cBB2", + "0x3515d74A11e0Cb65F0F46cB70ecf91dD1712daaa", + "0x50500a3c2b7b1229c6884505D00ac6Be29Aecd0C", + "0x9A223c2a11D4FD3585103B21B161a2B771aDA3d1", + "0xd7218df03AD0907e6c08E707B15d9BD14285e657", + "0x76CfD72eF5f93D1a44aD1F80856797fBE060c70a", + "0x44d093cB745944991EFF5cBa151AA6602d6f5420", + "0x626516DfF43bf09A71eb6fd1510E124F96ED0Cde", + "0x6530824632dfe099304E2DC5701cA99E6d031E08", + "0x57e6c423d6a7607160d6379A0c335025A14DaFC0", + "0x3966D4AD461Ef150E0B10163C81E79b9029E69c3", + "0xF608aCfd0C286E23721a3c347b2b65039f6690F1", + "0xbfB8FAac31A25646681936977837f7740fCd0072", + "0xd80aa634a623a7ED1F069a1a3A28a173061705c7", + "0x9122a77B36363e24e12E1E2D73F87b32926D3dF5", + "0x62562f0d1cD31315bCCf176049B6279B2bfc39C2", + "0x48aBF7A2a7119e5675059E27a7082ba7F38498b2", + "0xb4596983AB9A9166b29517acD634415807569e5F", + "0x52519D16E20BC8f5E96Da6d736963e85b2adA118", + "0x7663893C3dC0850EfC5391f5E5887eD723e51B83", + "0x5FF323a29bCC3B5b4B107e177EccEF4272959e61", + "0xee6e499AdDf4364D75c05D50d9344e9daA5A9AdF", + "0x1631b0BD31fF904aD67dD58994C6C2051CDe4E75", + "0xbc208e9723D44B9811C428f6A55722a26204eEF2", + "0xe76103a222Ee2C7Cf05B580858CEe625C4dc00E1", + "0xC71Bb2DBC51760f4fc2D46D84464410760971B8a", + "0xB4C18811e6BFe564D69E12c224FFc57351f7a7ff", + "0xD11DB0F5b41061A887cB7eE9c8711438844C298A", + "0xB931269934A3D4432c084bAAc3d0de8143199F4f", + "0x070037cc85C761946ec43ea2b8A2d5729908A2a1", + "0x2E34aa8C95Ffdbb37f14dCfBcA69291c55Ba48DE", + "0x052D93e8d9220787c31d6D83f87eC7dB088E998f", + "0x498dAC6C69b8b9ad645217050054840f1D91D029", + "0xE4F7D60f9d84301e1fFFd01385a585F3A11F8E89", + "0xEa637992f30eA06460732EDCBaCDa89355c2a107", + "0x4960d8Da07c27CB6Be48a79B96dD70657c57a6bF", + "0x7e471A003C8C9fdc8789Ded9C3dbe371d8aa0329", + "0xd24265Cc10eecb9e8d355CCc0dE4b11C556E74D7", + "0xDE59C8f7557Af779674f41CA2cA855d571018690", + "0x2fA8A6b3b6226d8efC9d8f6EBDc73Ca33DDcA4d8", + "0xe44102664c6c2024673Ff07DFe66E187Db77c65f", + "0x94E3f4f90a5f7CBF2cc2623e66B8583248F01022", + "0x0383EdBbc21D73DEd039E9C1Ff6bf56017b4CC40", + "0x64C3E49898B88d1E0f0d02DA23E0c00A2Cd0cA99", + "0xF4ccfB67b938d82B70bAb20975acFAe402E812E1", + "0x4f9ee5829e9852E32E7BC154D02c91D8E203e074", + "0xb006312eF9713463bB33D22De60444Ba95609f6B", + "0x7Cbe76ef69B52110DDb2e3b441C04dDb11D63248", + "0x70ADEEa65488F439392B869b1Df7241EF317e221", + "0x64C0bf8AA36Ba590477585Bc0D2BDa7970769463", + "0xA4cDc98593CE52d01Fe5Ca47CB3dA5320e0D7592", + "0xc26B34D375533fFc4c5276282Fa5D660F3d8cbcB", + ]; + let root = hex!("72b0acd7c302a84f1f6b6cefe0ba7194b7398afb440e1b44a9dbbe270394ca53"); + + let data = addresses + .into_iter() + .map(|address| hex::decode(&address[2..]).unwrap()) + .collect::>(); + + for l in 0..data.len() { + // when + let proof = merkle_proof::(data.clone(), l); + assert_eq!(hex::encode(&proof.root), hex::encode(&root)); + assert_eq!(proof.leaf_index, l); + assert_eq!(&proof.leaf, &data[l]); + + // then + assert!(verify_proof::( + &proof.root, + proof.proof, + data.len(), + proof.leaf_index, + &proof.leaf + )); + } + + let proof = merkle_proof::(data.clone(), data.len() - 1); + + assert_eq!( + proof, + MerkleProof { + root, + proof: vec![ + hex!("340bcb1d49b2d82802ddbcf5b85043edb3427b65d09d7f758fbc76932ad2da2f"), + hex!("ba0580e5bd530bc93d61276df7969fb5b4ae8f1864b4a28c280249575198ff1f"), + hex!("d02609d2bbdb28aa25f58b85afec937d5a4c85d37925bce6d0cf802f9d76ba79"), + hex!("ae3f8991955ed884613b0a5f40295902eea0e0abe5858fc520b72959bc016d4e"), + ], + number_of_leaves: data.len(), + leaf_index: data.len() - 1, + leaf: hex!("c26B34D375533fFc4c5276282Fa5D660F3d8cbcB").to_vec(), + } + ); + } +} diff --git a/frame/beefy-mmr/src/lib.rs b/frame/beefy-mmr/src/lib.rs new file mode 100644 index 0000000000000..001831639b169 --- /dev/null +++ b/frame/beefy-mmr/src/lib.rs @@ -0,0 +1,236 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg_attr(not(feature = "std"), no_std)] +#![warn(missing_docs)] + +//! A BEEFY+MMR pallet combo. +//! +//! While both BEEFY and Merkle Mountain Range (MMR) can be used separately, +//! these tools were designed to work together in unison. +//! +//! The pallet provides a standardized MMR Leaf format that is can be used +//! to bridge BEEFY+MMR-based networks (both standalone and polkadot-like). +//! +//! The MMR leaf contains: +//! 1. Block number and parent block hash. +//! 2. Merkle Tree Root Hash of next BEEFY validator set. +//! 3. Merkle Tree Root Hash of current parachain heads state. +//! +//! and thanks to versioning can be easily updated in the future. + +use sp_runtime::traits::{Convert, Hash}; +use sp_std::prelude::*; + +use beefy_primitives::mmr::{BeefyNextAuthoritySet, MmrLeaf, MmrLeafVersion}; +use pallet_mmr::primitives::LeafDataProvider; + +use codec::Encode; +use frame_support::traits::Get; + +pub use pallet::*; + +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + +/// A BEEFY consensus digest item with MMR root hash. +pub struct DepositBeefyDigest(sp_std::marker::PhantomData); + +impl pallet_mmr::primitives::OnNewRoot for DepositBeefyDigest +where + T: pallet_mmr::Config, + T: pallet_beefy::Config, +{ + fn on_new_root(root: &::Hash) { + let digest = sp_runtime::generic::DigestItem::Consensus( + beefy_primitives::BEEFY_ENGINE_ID, + codec::Encode::encode(&beefy_primitives::ConsensusLog::< + ::BeefyId, + >::MmrRoot(*root)), + ); + >::deposit_log(digest); + } +} + +/// Convert BEEFY secp256k1 public keys into Ethereum addresses +pub struct BeefyEcdsaToEthereum; +impl Convert> for BeefyEcdsaToEthereum { + fn convert(a: beefy_primitives::crypto::AuthorityId) -> Vec { + use sp_core::crypto::Public; + let compressed_key = a.as_slice(); + + libsecp256k1::PublicKey::parse_slice( + compressed_key, + Some(libsecp256k1::PublicKeyFormat::Compressed), + ) + // uncompress the key + .map(|pub_key| pub_key.serialize().to_vec()) + // now convert to ETH address + .map(|uncompressed| sp_io::hashing::keccak_256(&uncompressed[1..])[12..].to_vec()) + .map_err(|_| { + log::error!(target: "runtime::beefy", "Invalid BEEFY PublicKey format!"); + }) + .unwrap_or_default() + } +} + +type MerkleRootOf = ::Hash; +type ParaId = u32; +type ParaHead = Vec; + +/// A type that is able to return current list of parachain heads that end up in the MMR leaf. +pub trait ParachainHeadsProvider { + /// Return a list of tuples containing a `ParaId` and Parachain Header data (ParaHead). + /// + /// The returned data does not have to be sorted. + fn parachain_heads() -> Vec<(ParaId, ParaHead)>; +} + +/// A default implementation for runtimes without parachains. +impl ParachainHeadsProvider for () { + fn parachain_heads() -> Vec<(ParaId, ParaHead)> { + Default::default() + } +} + +#[frame_support::pallet] +pub mod pallet { + #![allow(missing_docs)] + + use super::*; + use frame_support::pallet_prelude::*; + + /// BEEFY-MMR pallet. + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + /// The module's configuration trait. + #[pallet::config] + #[pallet::disable_frame_system_supertrait_check] + pub trait Config: pallet_mmr::Config + pallet_beefy::Config { + /// Current leaf version. + /// + /// Specifies the version number added to every leaf that get's appended to the MMR. + /// Read more in [`MmrLeafVersion`] docs about versioning leaves. + type LeafVersion: Get; + + /// Convert BEEFY AuthorityId to a form that would end up in the Merkle Tree. + /// + /// For instance for ECDSA (secp256k1) we want to store uncompressed public keys (65 bytes) + /// and later to Ethereum Addresses (160 bits) to simplify using them on Ethereum chain, + /// but the rest of the Substrate codebase is storing them compressed (33 bytes) for + /// efficiency reasons. + type BeefyAuthorityToMerkleLeaf: Convert<::BeefyId, Vec>; + + /// Retrieve a list of current parachain heads. + /// + /// The trait is implemented for `paras` module, but since not all chains might have + /// parachains, and we want to keep the MMR leaf structure uniform, it's possible to use + /// `()` as well to simply put dummy data to the leaf. + type ParachainHeads: ParachainHeadsProvider; + } + + /// Details of next BEEFY authority set. + /// + /// This storage entry is used as cache for calls to [`update_beefy_next_authority_set`]. + #[pallet::storage] + #[pallet::getter(fn beefy_next_authorities)] + pub type BeefyNextAuthorities = + StorageValue<_, BeefyNextAuthoritySet>, ValueQuery>; +} + +impl LeafDataProvider for Pallet +where + MerkleRootOf: From + Into, +{ + type LeafData = MmrLeaf< + ::BlockNumber, + ::Hash, + MerkleRootOf, + >; + + fn leaf_data() -> Self::LeafData { + MmrLeaf { + version: T::LeafVersion::get(), + parent_number_and_hash: frame_system::Pallet::::leaf_data(), + parachain_heads: Pallet::::parachain_heads_merkle_root(), + beefy_next_authority_set: Pallet::::update_beefy_next_authority_set(), + } + } +} + +impl beefy_merkle_tree::Hasher for Pallet +where + MerkleRootOf: Into, +{ + fn hash(data: &[u8]) -> beefy_merkle_tree::Hash { + ::Hashing::hash(data).into() + } +} + +impl Pallet +where + MerkleRootOf: From + Into, +{ + /// Returns latest root hash of a merkle tree constructed from all active parachain headers. + /// + /// The leafs are sorted by `ParaId` to allow more efficient lookups and non-existence proofs. + /// + /// NOTE this does not include parathreads - only parachains are part of the merkle tree. + /// + /// NOTE This is an initial and inefficient implementation, which re-constructs + /// the merkle tree every block. Instead we should update the merkle root in + /// [Self::on_initialize] call of this pallet and update the merkle tree efficiently (use + /// on-chain storage to persist inner nodes). + fn parachain_heads_merkle_root() -> MerkleRootOf { + let mut para_heads = T::ParachainHeads::parachain_heads(); + para_heads.sort(); + let para_heads = para_heads.into_iter().map(|pair| pair.encode()); + beefy_merkle_tree::merkle_root::(para_heads).into() + } + + /// Returns details of the next BEEFY authority set. + /// + /// Details contain authority set id, authority set length and a merkle root, + /// constructed from uncompressed secp256k1 public keys converted to Ethereum addresses + /// of the next BEEFY authority set. + /// + /// This function will use a storage-cached entry in case the set didn't change, or compute and + /// cache new one in case it did. + fn update_beefy_next_authority_set() -> BeefyNextAuthoritySet> { + let id = pallet_beefy::Pallet::::validator_set_id() + 1; + let current_next = Self::beefy_next_authorities(); + // avoid computing the merkle tree if validator set id didn't change. + if id == current_next.id { + return current_next + } + + let beefy_addresses = pallet_beefy::Pallet::::next_authorities() + .into_iter() + .map(T::BeefyAuthorityToMerkleLeaf::convert) + .collect::>(); + let len = beefy_addresses.len() as u32; + let root = beefy_merkle_tree::merkle_root::(beefy_addresses).into(); + let next_set = BeefyNextAuthoritySet { id, len, root }; + // cache the result + BeefyNextAuthorities::::put(&next_set); + next_set + } +} diff --git a/frame/beefy-mmr/src/mock.rs b/frame/beefy-mmr/src/mock.rs new file mode 100644 index 0000000000000..4c9e103eb7b82 --- /dev/null +++ b/frame/beefy-mmr/src/mock.rs @@ -0,0 +1,206 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::vec; + +use beefy_primitives::mmr::MmrLeafVersion; +use frame_support::{ + construct_runtime, parameter_types, sp_io::TestExternalities, traits::GenesisBuild, + BasicExternalities, +}; +use sp_core::{Hasher, H256}; +use sp_runtime::{ + app_crypto::ecdsa::Public, + impl_opaque_keys, + testing::Header, + traits::{BlakeTwo256, ConvertInto, IdentityLookup, Keccak256, OpaqueKeys}, + Perbill, +}; + +use crate as pallet_beefy_mmr; + +pub use beefy_primitives::{crypto::AuthorityId as BeefyId, ConsensusLog, BEEFY_ENGINE_ID}; + +impl_opaque_keys! { + pub struct MockSessionKeys { + pub dummy: pallet_beefy::Pallet, + } +} + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Session: pallet_session::{Pallet, Call, Storage, Event, Config}, + Mmr: pallet_mmr::{Pallet, Storage}, + Beefy: pallet_beefy::{Pallet, Config, Storage}, + BeefyMmr: pallet_beefy_mmr::{Pallet, Storage}, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const SS58Prefix: u8 = 42; +} + +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Call = Call; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = SS58Prefix; + type OnSetCode = (); +} + +parameter_types! { + pub const Period: u64 = 1; + pub const Offset: u64 = 0; + pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(33); +} + +impl pallet_session::Config for Test { + type Event = Event; + type ValidatorId = u64; + type ValidatorIdOf = ConvertInto; + type ShouldEndSession = pallet_session::PeriodicSessions; + type NextSessionRotation = pallet_session::PeriodicSessions; + type SessionManager = MockSessionManager; + type SessionHandler = ::KeyTypeIdProviders; + type Keys = MockSessionKeys; + type DisabledValidatorsThreshold = DisabledValidatorsThreshold; + type WeightInfo = (); +} + +pub type MmrLeaf = beefy_primitives::mmr::MmrLeaf< + ::BlockNumber, + ::Hash, + ::Hash, +>; + +impl pallet_mmr::Config for Test { + const INDEXING_PREFIX: &'static [u8] = b"mmr"; + + type Hashing = Keccak256; + + type Hash = ::Out; + + type LeafData = BeefyMmr; + + type OnNewRoot = pallet_beefy_mmr::DepositBeefyDigest; + + type WeightInfo = (); +} + +impl pallet_beefy::Config for Test { + type BeefyId = BeefyId; +} + +parameter_types! { + pub LeafVersion: MmrLeafVersion = MmrLeafVersion::new(1, 5); +} + +impl pallet_beefy_mmr::Config for Test { + type LeafVersion = LeafVersion; + + type BeefyAuthorityToMerkleLeaf = pallet_beefy_mmr::BeefyEcdsaToEthereum; + + type ParachainHeads = DummyParaHeads; +} + +pub struct DummyParaHeads; +impl pallet_beefy_mmr::ParachainHeadsProvider for DummyParaHeads { + fn parachain_heads() -> Vec<(pallet_beefy_mmr::ParaId, pallet_beefy_mmr::ParaHead)> { + vec![(15, vec![1, 2, 3]), (5, vec![4, 5, 6])] + } +} + +pub struct MockSessionManager; +impl pallet_session::SessionManager for MockSessionManager { + fn end_session(_: sp_staking::SessionIndex) {} + fn start_session(_: sp_staking::SessionIndex) {} + fn new_session(idx: sp_staking::SessionIndex) -> Option> { + if idx == 0 || idx == 1 { + Some(vec![1, 2]) + } else if idx == 2 { + Some(vec![3, 4]) + } else { + None + } + } +} + +// Note, that we can't use `UintAuthorityId` here. Reason is that the implementation +// of `to_public_key()` assumes, that a public key is 32 bytes long. This is true for +// ed25519 and sr25519 but *not* for ecdsa. An ecdsa public key is 33 bytes. +pub fn mock_beefy_id(id: u8) -> BeefyId { + let buf: [u8; 33] = [id; 33]; + let pk = Public::from_raw(buf); + BeefyId::from(pk) +} + +pub fn mock_authorities(vec: Vec) -> Vec<(u64, BeefyId)> { + vec.into_iter().map(|id| ((id as u64), mock_beefy_id(id))).collect() +} + +pub fn new_test_ext(ids: Vec) -> TestExternalities { + new_test_ext_raw_authorities(mock_authorities(ids)) +} + +pub fn new_test_ext_raw_authorities(authorities: Vec<(u64, BeefyId)>) -> TestExternalities { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + + let session_keys: Vec<_> = authorities + .iter() + .enumerate() + .map(|(_, id)| (id.0 as u64, id.0 as u64, MockSessionKeys { dummy: id.1.clone() })) + .collect(); + + BasicExternalities::execute_with_storage(&mut t, || { + for (ref id, ..) in &session_keys { + frame_system::Pallet::::inc_providers(id); + } + }); + + pallet_session::GenesisConfig:: { keys: session_keys } + .assimilate_storage(&mut t) + .unwrap(); + + t.into() +} diff --git a/frame/beefy-mmr/src/tests.rs b/frame/beefy-mmr/src/tests.rs new file mode 100644 index 0000000000000..7c70766623b4d --- /dev/null +++ b/frame/beefy-mmr/src/tests.rs @@ -0,0 +1,148 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::vec; + +use beefy_primitives::{ + mmr::{BeefyNextAuthoritySet, MmrLeafVersion}, + ValidatorSet, +}; +use codec::{Decode, Encode}; +use hex_literal::hex; + +use sp_core::H256; +use sp_io::TestExternalities; +use sp_runtime::{traits::Keccak256, DigestItem}; + +use frame_support::traits::OnInitialize; + +use crate::mock::*; + +fn init_block(block: u64) { + System::set_block_number(block); + Session::on_initialize(block); + Mmr::on_initialize(block); + Beefy::on_initialize(block); + BeefyMmr::on_initialize(block); +} + +pub fn beefy_log(log: ConsensusLog) -> DigestItem { + DigestItem::Consensus(BEEFY_ENGINE_ID, log.encode()) +} + +fn offchain_key(pos: usize) -> Vec { + (::INDEXING_PREFIX, pos as u64).encode() +} + +fn read_mmr_leaf(ext: &mut TestExternalities, index: usize) -> MmrLeaf { + type Node = pallet_mmr_primitives::DataOrHash; + ext.persist_offchain_overlay(); + let offchain_db = ext.offchain_db(); + offchain_db + .get(&offchain_key(index)) + .map(|d| Node::decode(&mut &*d).unwrap()) + .map(|n| match n { + Node::Data(d) => d, + _ => panic!("Unexpected MMR node."), + }) + .unwrap() +} + +#[test] +fn should_contain_mmr_digest() { + let mut ext = new_test_ext(vec![1, 2, 3, 4]); + ext.execute_with(|| { + init_block(1); + + assert_eq!( + System::digest().logs, + vec![beefy_log(ConsensusLog::MmrRoot( + hex!("f3e3afbfa69e89cd1e99f8d3570155962f3346d1d8758dc079be49ef70387758").into() + ))] + ); + + // unique every time + init_block(2); + + assert_eq!( + System::digest().logs, + vec![ + beefy_log(ConsensusLog::MmrRoot( + hex!("f3e3afbfa69e89cd1e99f8d3570155962f3346d1d8758dc079be49ef70387758").into() + )), + beefy_log(ConsensusLog::AuthoritiesChange(ValidatorSet { + validators: vec![mock_beefy_id(3), mock_beefy_id(4),], + id: 1, + })), + beefy_log(ConsensusLog::MmrRoot( + hex!("7d4ae4524bae75d52b63f08eab173b0c263eb95ae2c55c3a1d871241bd0cc559").into() + )), + ] + ); + }); +} + +#[test] +fn should_contain_valid_leaf_data() { + let mut ext = new_test_ext(vec![1, 2, 3, 4]); + ext.execute_with(|| { + init_block(1); + }); + + let mmr_leaf = read_mmr_leaf(&mut ext, 0); + assert_eq!( + mmr_leaf, + MmrLeaf { + version: MmrLeafVersion::new(1, 5), + parent_number_and_hash: (0_u64, H256::repeat_byte(0x45)), + beefy_next_authority_set: BeefyNextAuthoritySet { + id: 1, + len: 2, + root: hex!("01b1a742589773fc054c8f5021a456316ffcec0370b25678b0696e116d1ef9ae") + .into(), + }, + parachain_heads: hex!( + "ed893c8f8cc87195a5d4d2805b011506322036bcace79642aa3e94ab431e442e" + ) + .into(), + } + ); + + // build second block on top + ext.execute_with(|| { + init_block(2); + }); + + let mmr_leaf = read_mmr_leaf(&mut ext, 1); + assert_eq!( + mmr_leaf, + MmrLeaf { + version: MmrLeafVersion::new(1, 5), + parent_number_and_hash: (1_u64, H256::repeat_byte(0x45)), + beefy_next_authority_set: BeefyNextAuthoritySet { + id: 2, + len: 2, + root: hex!("9c6b2c1b0d0b25a008e6c882cc7b415f309965c72ad2b944ac0931048ca31cd5") + .into(), + }, + parachain_heads: hex!( + "ed893c8f8cc87195a5d4d2805b011506322036bcace79642aa3e94ab431e442e" + ) + .into(), + } + ); +} diff --git a/frame/beefy/Cargo.toml b/frame/beefy/Cargo.toml new file mode 100644 index 0000000000000..e5af666e7ca54 --- /dev/null +++ b/frame/beefy/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "pallet-beefy" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" + +[dependencies] +codec = { version = "2.2.0", package = "parity-scale-codec", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +serde = { version = "1.0.130", optional = true } + +frame-support = { version = "4.0.0-dev", path = "../support", default-features = false } +frame-system = { version = "4.0.0-dev", path = "../system", default-features = false } + +sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime", default-features = false } +sp-std = { version = "4.0.0-dev", path = "../../primitives/std", default-features = false } + +pallet-session = { version = "4.0.0-dev", path = "../session", default-features = false } + +beefy-primitives = { version = "4.0.0-dev", path = "../../primitives/beefy", default-features = false } + +[dev-dependencies] +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +sp-io = { version = "4.0.0-dev", path = "../../primitives/io" } +sp-staking = { version = "4.0.0-dev", path = "../../primitives/staking" } + +[features] +default = ["std"] +std = [ + "codec/std", + "scale-info/std", + "serde", + "beefy-primitives/std", + "frame-support/std", + "frame-system/std", + "sp-runtime/std", + "sp-std/std", + "pallet-session/std", +] diff --git a/frame/beefy/src/lib.rs b/frame/beefy/src/lib.rs new file mode 100644 index 0000000000000..32f3133373432 --- /dev/null +++ b/frame/beefy/src/lib.rs @@ -0,0 +1,179 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg_attr(not(feature = "std"), no_std)] + +use codec::Encode; + +use frame_support::{traits::OneSessionHandler, Parameter}; + +use sp_runtime::{ + generic::DigestItem, + traits::{IsMember, Member}, + RuntimeAppPublic, +}; +use sp_std::prelude::*; + +use beefy_primitives::{AuthorityIndex, ConsensusLog, ValidatorSet, BEEFY_ENGINE_ID}; + +#[cfg(test)] +mod mock; + +#[cfg(test)] +mod tests; + +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + /// Authority identifier type + type BeefyId: Member + Parameter + RuntimeAppPublic + Default + MaybeSerializeDeserialize; + } + + #[pallet::pallet] + pub struct Pallet(PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + /// The current authorities set + #[pallet::storage] + #[pallet::getter(fn authorities)] + pub(super) type Authorities = StorageValue<_, Vec, ValueQuery>; + + /// The current validator set id + #[pallet::storage] + #[pallet::getter(fn validator_set_id)] + pub(super) type ValidatorSetId = + StorageValue<_, beefy_primitives::ValidatorSetId, ValueQuery>; + + /// Authorities set scheduled to be used with the next session + #[pallet::storage] + #[pallet::getter(fn next_authorities)] + pub(super) type NextAuthorities = StorageValue<_, Vec, ValueQuery>; + + #[pallet::genesis_config] + pub struct GenesisConfig { + pub authorities: Vec, + } + + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + Self { authorities: Vec::new() } + } + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + Pallet::::initialize_authorities(&self.authorities); + } + } +} + +impl Pallet { + /// Return the current active BEEFY validator set. + pub fn validator_set() -> ValidatorSet { + ValidatorSet:: { validators: Self::authorities(), id: Self::validator_set_id() } + } + + fn change_authorities(new: Vec, queued: Vec) { + // As in GRANDPA, we trigger a validator set change only if the the validator + // set has actually changed. + if new != Self::authorities() { + >::put(&new); + + let next_id = Self::validator_set_id() + 1u64; + >::put(next_id); + + let log: DigestItem = DigestItem::Consensus( + BEEFY_ENGINE_ID, + ConsensusLog::AuthoritiesChange(ValidatorSet { validators: new, id: next_id }) + .encode(), + ); + >::deposit_log(log); + } + + >::put(&queued); + } + + fn initialize_authorities(authorities: &[T::BeefyId]) { + if authorities.is_empty() { + return + } + + assert!(>::get().is_empty(), "Authorities are already initialized!"); + + >::put(authorities); + >::put(0); + // Like `pallet_session`, initialize the next validator set as well. + >::put(authorities); + } +} + +impl sp_runtime::BoundToRuntimeAppPublic for Pallet { + type Public = T::BeefyId; +} + +impl OneSessionHandler for Pallet { + type Key = T::BeefyId; + + fn on_genesis_session<'a, I: 'a>(validators: I) + where + I: Iterator, + { + let authorities = validators.map(|(_, k)| k).collect::>(); + Self::initialize_authorities(&authorities); + } + + fn on_new_session<'a, I: 'a>(changed: bool, validators: I, queued_validators: I) + where + I: Iterator, + { + if changed { + let next_authorities = validators.map(|(_, k)| k).collect::>(); + let next_queued_authorities = queued_validators.map(|(_, k)| k).collect::>(); + + Self::change_authorities(next_authorities, next_queued_authorities); + } + } + + fn on_disabled(i: usize) { + let log: DigestItem = DigestItem::Consensus( + BEEFY_ENGINE_ID, + ConsensusLog::::OnDisabled(i as AuthorityIndex).encode(), + ); + + >::deposit_log(log); + } +} + +impl IsMember for Pallet { + fn is_member(authority_id: &T::BeefyId) -> bool { + Self::authorities().iter().any(|id| id == authority_id) + } +} diff --git a/frame/beefy/src/mock.rs b/frame/beefy/src/mock.rs new file mode 100644 index 0000000000000..baa2fae746fe3 --- /dev/null +++ b/frame/beefy/src/mock.rs @@ -0,0 +1,165 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::vec; + +use frame_support::{ + construct_runtime, parameter_types, sp_io::TestExternalities, traits::GenesisBuild, + BasicExternalities, +}; +use sp_core::H256; +use sp_runtime::{ + app_crypto::ecdsa::Public, + impl_opaque_keys, + testing::Header, + traits::{BlakeTwo256, ConvertInto, IdentityLookup, OpaqueKeys}, + Perbill, +}; + +use crate as pallet_beefy; + +pub use beefy_primitives::{crypto::AuthorityId as BeefyId, ConsensusLog, BEEFY_ENGINE_ID}; + +impl_opaque_keys! { + pub struct MockSessionKeys { + pub dummy: pallet_beefy::Pallet, + } +} + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Beefy: pallet_beefy::{Pallet, Call, Config, Storage}, + Session: pallet_session::{Pallet, Call, Storage, Event, Config}, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const SS58Prefix: u8 = 42; +} + +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Call = Call; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = SS58Prefix; + type OnSetCode = (); +} + +impl pallet_beefy::Config for Test { + type BeefyId = BeefyId; +} + +parameter_types! { + pub const Period: u64 = 1; + pub const Offset: u64 = 0; + pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(33); +} + +impl pallet_session::Config for Test { + type Event = Event; + type ValidatorId = u64; + type ValidatorIdOf = ConvertInto; + type ShouldEndSession = pallet_session::PeriodicSessions; + type NextSessionRotation = pallet_session::PeriodicSessions; + type SessionManager = MockSessionManager; + type SessionHandler = ::KeyTypeIdProviders; + type Keys = MockSessionKeys; + type DisabledValidatorsThreshold = DisabledValidatorsThreshold; + type WeightInfo = (); +} + +pub struct MockSessionManager; + +impl pallet_session::SessionManager for MockSessionManager { + fn end_session(_: sp_staking::SessionIndex) {} + fn start_session(_: sp_staking::SessionIndex) {} + fn new_session(idx: sp_staking::SessionIndex) -> Option> { + if idx == 0 || idx == 1 { + Some(vec![1, 2]) + } else if idx == 2 { + Some(vec![3, 4]) + } else { + None + } + } +} + +// Note, that we can't use `UintAuthorityId` here. Reason is that the implementation +// of `to_public_key()` assumes, that a public key is 32 bytes long. This is true for +// ed25519 and sr25519 but *not* for ecdsa. An ecdsa public key is 33 bytes. +pub fn mock_beefy_id(id: u8) -> BeefyId { + let buf: [u8; 33] = [id; 33]; + let pk = Public::from_raw(buf); + BeefyId::from(pk) +} + +pub fn mock_authorities(vec: Vec) -> Vec<(u64, BeefyId)> { + vec.into_iter().map(|id| ((id as u64), mock_beefy_id(id))).collect() +} + +pub fn new_test_ext(ids: Vec) -> TestExternalities { + new_test_ext_raw_authorities(mock_authorities(ids)) +} + +pub fn new_test_ext_raw_authorities(authorities: Vec<(u64, BeefyId)>) -> TestExternalities { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + + let session_keys: Vec<_> = authorities + .iter() + .enumerate() + .map(|(_, id)| (id.0 as u64, id.0 as u64, MockSessionKeys { dummy: id.1.clone() })) + .collect(); + + BasicExternalities::execute_with_storage(&mut t, || { + for (ref id, ..) in &session_keys { + frame_system::Pallet::::inc_providers(id); + } + }); + + pallet_session::GenesisConfig:: { keys: session_keys } + .assimilate_storage(&mut t) + .unwrap(); + + t.into() +} diff --git a/frame/beefy/src/tests.rs b/frame/beefy/src/tests.rs new file mode 100644 index 0000000000000..24f9acaf76bfc --- /dev/null +++ b/frame/beefy/src/tests.rs @@ -0,0 +1,142 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::vec; + +use beefy_primitives::ValidatorSet; +use codec::Encode; + +use sp_core::H256; +use sp_runtime::DigestItem; + +use frame_support::traits::OnInitialize; + +use crate::mock::*; + +fn init_block(block: u64) { + System::set_block_number(block); + Session::on_initialize(block); +} + +pub fn beefy_log(log: ConsensusLog) -> DigestItem { + DigestItem::Consensus(BEEFY_ENGINE_ID, log.encode()) +} + +#[test] +fn genesis_session_initializes_authorities() { + let want = vec![mock_beefy_id(1), mock_beefy_id(2), mock_beefy_id(3), mock_beefy_id(4)]; + + new_test_ext(vec![1, 2, 3, 4]).execute_with(|| { + let authorities = Beefy::authorities(); + + assert!(authorities.len() == 2); + assert_eq!(want[0], authorities[0]); + assert_eq!(want[1], authorities[1]); + + assert!(Beefy::validator_set_id() == 0); + + let next_authorities = Beefy::next_authorities(); + + assert!(next_authorities.len() == 2); + assert_eq!(want[0], next_authorities[0]); + assert_eq!(want[1], next_authorities[1]); + }); +} + +#[test] +fn session_change_updates_authorities() { + new_test_ext(vec![1, 2, 3, 4]).execute_with(|| { + init_block(1); + + assert!(0 == Beefy::validator_set_id()); + + // no change - no log + assert!(System::digest().logs.is_empty()); + + init_block(2); + + assert!(1 == Beefy::validator_set_id()); + + let want = beefy_log(ConsensusLog::AuthoritiesChange(ValidatorSet { + validators: vec![mock_beefy_id(3), mock_beefy_id(4)], + id: 1, + })); + + let log = System::digest().logs[0].clone(); + + assert_eq!(want, log); + }); +} + +#[test] +fn session_change_updates_next_authorities() { + let want = vec![mock_beefy_id(1), mock_beefy_id(2), mock_beefy_id(3), mock_beefy_id(4)]; + + new_test_ext(vec![1, 2, 3, 4]).execute_with(|| { + init_block(1); + + let next_authorities = Beefy::next_authorities(); + + assert!(next_authorities.len() == 2); + assert_eq!(want[0], next_authorities[0]); + assert_eq!(want[1], next_authorities[1]); + + init_block(2); + + let next_authorities = Beefy::next_authorities(); + + assert!(next_authorities.len() == 2); + assert_eq!(want[2], next_authorities[0]); + assert_eq!(want[3], next_authorities[1]); + }); +} + +#[test] +fn validator_set_at_genesis() { + let want = vec![mock_beefy_id(1), mock_beefy_id(2)]; + + new_test_ext(vec![1, 2, 3, 4]).execute_with(|| { + let vs = Beefy::validator_set(); + + assert_eq!(vs.id, 0u64); + assert_eq!(vs.validators[0], want[0]); + assert_eq!(vs.validators[1], want[1]); + }); +} + +#[test] +fn validator_set_updates_work() { + let want = vec![mock_beefy_id(1), mock_beefy_id(2), mock_beefy_id(3), mock_beefy_id(4)]; + + new_test_ext(vec![1, 2, 3, 4]).execute_with(|| { + init_block(1); + + let vs = Beefy::validator_set(); + + assert_eq!(vs.id, 0u64); + assert_eq!(want[0], vs.validators[0]); + assert_eq!(want[1], vs.validators[1]); + + init_block(2); + + let vs = Beefy::validator_set(); + + assert_eq!(vs.id, 1u64); + assert_eq!(want[2], vs.validators[0]); + assert_eq!(want[3], vs.validators[1]); + }); +} diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index 6c124a8a75761..1805424426f6e 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -131,6 +131,13 @@ macro_rules! whitelist { /// let c = 0 .. 10 => setup_c_in_some_other_way(&caller, c); /// }: baz(Origin::Signed(caller)) /// +/// // You may optionally specify the origin type if it can't be determined automatically like +/// // this. +/// baz3 { +/// let caller = account::(b"caller", 0, benchmarks_seed); +/// let l in 1 .. MAX_LENGTH => initialize_l(l); +/// }: baz(Origin::Signed(caller), vec![0u8; l]) +/// /// // this is benchmarking some code that is not a dispatchable. /// populate_a_set { /// let x in 0 .. 10_000; @@ -148,6 +155,12 @@ macro_rules! whitelist { /// benchmark just like a regular benchmark, but only testing at the lowest and highest values for /// each component. The function will return `Ok(())` if the benchmarks return no errors. /// +/// It is also possible to generate one #[test] function per benchmark by calling the +/// `impl_benchmark_test_suite` macro inside the `benchmarks` block. The functions will be named +/// `bench_` and can be run via `cargo test`. +/// You will see one line of output per benchmark. This approach will give you more understandable +/// error messages and allows for parallel benchmark execution. +/// /// You can optionally add a `verify` code block at the end of a benchmark to test any final state /// of your benchmark in a unit test. For example: /// @@ -167,7 +180,8 @@ macro_rules! whitelist { /// /// These `verify` blocks will not affect your benchmark results! /// -/// You can construct benchmark tests like so: +/// You can construct benchmark by using the `impl_benchmark_test_suite` macro or +/// by manually implementing them like so: /// /// ```ignore /// #[test] @@ -186,6 +200,7 @@ macro_rules! benchmarks { $( $rest:tt )* ) => { $crate::benchmarks_iter!( + { } { } { } ( ) @@ -205,6 +220,7 @@ macro_rules! benchmarks_instance { $( $rest:tt )* ) => { $crate::benchmarks_iter!( + { } { I: Instance } { } ( ) @@ -224,6 +240,7 @@ macro_rules! benchmarks_instance_pallet { $( $rest:tt )* ) => { $crate::benchmarks_iter!( + { } { I: 'static } { } ( ) @@ -237,8 +254,60 @@ macro_rules! benchmarks_instance_pallet { #[macro_export] #[doc(hidden)] macro_rules! benchmarks_iter { + // detect and extract `impl_benchmark_test_suite` call: + // - with a semi-colon + ( + { } + { $( $instance:ident: $instance_bound:tt )? } + { $( $where_clause:tt )* } + ( $( $names:tt )* ) + ( $( $names_extra:tt )* ) + ( $( $names_skip_meta:tt )* ) + impl_benchmark_test_suite!( + $bench_module:ident, + $new_test_ext:expr, + $test:path + $(, $( $args:tt )* )?); + $( $rest:tt )* + ) => { + $crate::benchmarks_iter! { + { $bench_module, $new_test_ext, $test $(, $( $args )* )? } + { $( $instance: $instance_bound )? } + { $( $where_clause )* } + ( $( $names )* ) + ( $( $names_extra )* ) + ( $( $names_skip_meta )* ) + $( $rest )* + } + }; + // - without a semicolon + ( + { } + { $( $instance:ident: $instance_bound:tt )? } + { $( $where_clause:tt )* } + ( $( $names:tt )* ) + ( $( $names_extra:tt )* ) + ( $( $names_skip_meta:tt )* ) + impl_benchmark_test_suite!( + $bench_module:ident, + $new_test_ext:expr, + $test:path + $(, $( $args:tt )* )?) + $( $rest:tt )* + ) => { + $crate::benchmarks_iter! { + { $bench_module, $new_test_ext, $test $(, $( $args )* )? } + { $( $instance: $instance_bound )? } + { $( $where_clause )* } + ( $( $names )* ) + ( $( $names_extra )* ) + ( $( $names_skip_meta )* ) + $( $rest )* + } + }; // detect and extract where clause: ( + { $($bench_module:ident, $new_test_ext:expr, $test:path $(, $( $args:tt )* )?)? } { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } ( $( $names:tt )* ) @@ -248,6 +317,7 @@ macro_rules! benchmarks_iter { $( $rest:tt )* ) => { $crate::benchmarks_iter! { + { $($bench_module, $new_test_ext, $test $(, $( $args )* )?)? } { $( $instance: $instance_bound)? } { $( $where_bound )* } ( $( $names )* ) @@ -258,6 +328,7 @@ macro_rules! benchmarks_iter { }; // detect and extract `#[skip_meta]` tag: ( + { $($bench_module:ident, $new_test_ext:expr, $test:path $(, $( $args:tt )* )?)? } { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } ( $( $names:tt )* ) @@ -268,6 +339,7 @@ macro_rules! benchmarks_iter { $( $rest:tt )* ) => { $crate::benchmarks_iter! { + { $($bench_module, $new_test_ext, $test $(, $( $args )* )?)? } { $( $instance: $instance_bound )? } { $( $where_clause )* } ( $( $names )* ) @@ -277,8 +349,9 @@ macro_rules! benchmarks_iter { $( $rest )* } }; - // detect and extract `#[extra] tag: + // detect and extract `#[extra]` tag: ( + { $($bench_module:ident, $new_test_ext:expr, $test:path $(, $( $args:tt )* )?)? } { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } ( $( $names:tt )* ) @@ -289,6 +362,7 @@ macro_rules! benchmarks_iter { $( $rest:tt )* ) => { $crate::benchmarks_iter! { + { $($bench_module, $new_test_ext, $test $(, $( $args )* )?)? } { $( $instance: $instance_bound )? } { $( $where_clause )* } ( $( $names )* ) @@ -300,39 +374,43 @@ macro_rules! benchmarks_iter { }; // mutation arm: ( + { $($bench_module:ident, $new_test_ext:expr, $test:path $(, $( $args:tt )* )?)? } { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } ( $( $names:tt )* ) // This contains $( $( { $instance } )? $name:ident )* ( $( $names_extra:tt )* ) ( $( $names_skip_meta:tt )* ) - $name:ident { $( $code:tt )* }: _ ( $origin:expr $( , $arg:expr )* ) + $name:ident { $( $code:tt )* }: _ $(< $origin_type:ty>)? ( $origin:expr $( , $arg:expr )* ) verify $postcode:block $( $rest:tt )* ) => { $crate::benchmarks_iter! { + { $($bench_module, $new_test_ext, $test $(, $( $args )* )?)? } { $( $instance: $instance_bound )? } { $( $where_clause )* } ( $( $names )* ) ( $( $names_extra )* ) ( $( $names_skip_meta )* ) - $name { $( $code )* }: $name ( $origin $( , $arg )* ) + $name { $( $code )* }: $name $(< $origin_type >)? ( $origin $( , $arg )* ) verify $postcode $( $rest )* } }; // mutation arm: ( + { $($bench_module:ident, $new_test_ext:expr, $test:path $(, $( $args:tt )* )?)? } { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } ( $( $names:tt )* ) ( $( $names_extra:tt )* ) ( $( $names_skip_meta:tt )* ) - $name:ident { $( $code:tt )* }: $dispatch:ident ( $origin:expr $( , $arg:expr )* ) + $name:ident { $( $code:tt )* }: $dispatch:ident $(<$origin_type:ty>)? ( $origin:expr $( , $arg:expr )* ) verify $postcode:block $( $rest:tt )* ) => { $crate::paste::paste! { $crate::benchmarks_iter! { + { $($bench_module, $new_test_ext, $test $(, $( $args )* )?)? } { $( $instance: $instance_bound )? } { $( $where_clause )* } ( $( $names )* ) @@ -350,15 +428,14 @@ macro_rules! benchmarks_iter { &__call ); }: { - let call_decoded = < + let __call_decoded = < Call as $crate::frame_support::codec::Decode >::decode(&mut &__benchmarked_call_encoded[..]) .expect("call is encoded above, encoding must be correct"); - - < - Call as $crate::frame_support::traits::UnfilteredDispatchable - >::dispatch_bypass_filter(call_decoded, $origin.into())?; + let __origin = $crate::to_origin!($origin $(, $origin_type)?); + as $crate::frame_support::traits::UnfilteredDispatchable + >::dispatch_bypass_filter(__call_decoded, __origin)?; } verify $postcode $( $rest )* @@ -367,6 +444,7 @@ macro_rules! benchmarks_iter { }; // iteration arm: ( + { $($bench_module:ident, $new_test_ext:expr, $test:path $(, $( $args:tt )* )?)? } { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } ( $( $names:tt )* ) @@ -394,6 +472,7 @@ macro_rules! benchmarks_iter { ); $crate::benchmarks_iter!( + { $($bench_module, $new_test_ext, $test $(, $( $args )* )?)? } { $( $instance: $instance_bound )? } { $( $where_clause )* } ( $( $names )* { $( $instance )? } $name ) @@ -402,8 +481,40 @@ macro_rules! benchmarks_iter { $( $rest )* ); }; - // iteration-exit arm + // iteration-exit arm which generates a #[test] function for each case. ( + { $bench_module:ident, $new_test_ext:expr, $test:path $(, $( $args:tt )* )? } + { $( $instance:ident: $instance_bound:tt )? } + { $( $where_clause:tt )* } + ( $( $names:tt )* ) + ( $( $names_extra:tt )* ) + ( $( $names_skip_meta:tt )* ) + ) => { + $crate::selected_benchmark!( + { $( $where_clause)* } + { $( $instance: $instance_bound )? } + $( $names )* + ); + $crate::impl_benchmark!( + { $( $where_clause )* } + { $( $instance: $instance_bound )? } + ( $( $names )* ) + ( $( $names_extra ),* ) + ( $( $names_skip_meta ),* ) + ); + $crate::impl_test_function!( + ( $( $names )* ) + ( $( $names_extra )* ) + ( $( $names_skip_meta )* ) + $bench_module, + $new_test_ext, + $test + $(, $( $args )* )? + ); + }; + // iteration-exit arm which doesn't generate a #[test] function for all cases. + ( + { } { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } ( $( $names:tt )* ) @@ -425,6 +536,7 @@ macro_rules! benchmarks_iter { }; // add verify block to _() format ( + { $($bench_module:ident, $new_test_ext:expr, $test:path $(, $( $args:tt )* )?)? } { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } ( $( $names:tt )* ) @@ -434,6 +546,7 @@ macro_rules! benchmarks_iter { $( $rest:tt )* ) => { $crate::benchmarks_iter! { + { $($bench_module, $new_test_ext, $test $(, $( $args )* )?)? } { $( $instance: $instance_bound )? } { $( $where_clause )* } ( $( $names )* ) @@ -446,6 +559,7 @@ macro_rules! benchmarks_iter { }; // add verify block to name() format ( + { $($bench_module:ident, $new_test_ext:expr, $test:path $(, $( $args:tt )* )?)? } { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } ( $( $names:tt )* ) @@ -455,6 +569,7 @@ macro_rules! benchmarks_iter { $( $rest:tt )* ) => { $crate::benchmarks_iter! { + { $($bench_module, $new_test_ext, $test $(, $( $args )* )?)? } { $( $instance: $instance_bound )? } { $( $where_clause )* } ( $( $names )* ) @@ -467,6 +582,7 @@ macro_rules! benchmarks_iter { }; // add verify block to {} format ( + { $($bench_module:ident, $new_test_ext:expr, $test:path $(, $( $args:tt )* )?)? } { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } ( $( $names:tt )* ) @@ -476,6 +592,7 @@ macro_rules! benchmarks_iter { $( $rest:tt )* ) => { $crate::benchmarks_iter!( + { $($bench_module, $new_test_ext, $test $(, $( $args )* )?)? } { $( $instance: $instance_bound )? } { $( $where_clause )* } ( $( $names )* ) @@ -488,6 +605,17 @@ macro_rules! benchmarks_iter { }; } +#[macro_export] +#[doc(hidden)] +macro_rules! to_origin { + ($origin:expr) => { + $origin.into() + }; + ($origin:expr, $origin_type:ty) => { + >::from($origin) + }; +} + #[macro_export] #[doc(hidden)] macro_rules! benchmark_backend { @@ -678,6 +806,100 @@ macro_rules! benchmark_backend { }; } +// Creates #[test] functions for the given bench cases. +#[macro_export] +#[doc(hidden)] +macro_rules! impl_bench_case_tests { + ( + { $module:ident, $new_test_exec:expr, $exec_name:ident, $test:path, $extra:expr } + { $( $names_extra:tt )* } + $( { $( $bench_inst:ident )? } $bench:ident )* + ) + => { + $crate::impl_bench_name_tests!( + $module, $new_test_exec, $exec_name, $test, $extra, + { $( $names_extra )* }, + $( { $bench } )+ + ); + } +} + +// Creates a #[test] function for the given bench name. +#[macro_export] +#[doc(hidden)] +macro_rules! impl_bench_name_tests { + // recursion anchor + ( + $module:ident, $new_test_exec:expr, $exec_name:ident, $test:path, $extra:expr, + { $( $names_extra:tt )* }, + { $name:ident } + ) => { + $crate::paste::paste! { + #[test] + fn [] () { + $new_test_exec.$exec_name(|| { + // Skip all #[extra] benchmarks if $extra is false. + if !($extra) { + let disabled = $crate::vec![ $( stringify!($names_extra).as_ref() ),* ]; + if disabled.contains(&stringify!($name)) { + $crate::log::error!( + "INFO: extra benchmark skipped - {}", + stringify!($name), + ); + return (); + } + } + + // Same per-case logic as when all cases are run in the + // same function. + match std::panic::catch_unwind(|| { + $module::<$test>::[< test_benchmark_ $name >] () + }) { + Err(err) => { + panic!("{}: {:?}", stringify!($name), err); + }, + Ok(Err(err)) => { + match err { + $crate::BenchmarkError::Stop(err) => { + panic!("{}: {:?}", stringify!($name), err); + }, + $crate::BenchmarkError::Override(_) => { + // This is still considered a success condition. + $crate::log::error!( + "WARNING: benchmark error overrided - {}", + stringify!($name), + ); + }, + $crate::BenchmarkError::Skip => { + // This is considered a success condition. + $crate::log::error!( + "WARNING: benchmark error skipped - {}", + stringify!($name), + ); + } + } + }, + Ok(Ok(())) => (), + } + }); + } + } + }; + // recursion tail + ( + $module:ident, $new_test_exec:expr, $exec_name:ident, $test:path, $extra:expr, + { $( $names_extra:tt )* }, + { $name:ident } $( { $rest:ident } )+ + ) => { + // car + $crate::impl_bench_name_tests!($module, $new_test_exec, $exec_name, $test, $extra, + { $( $names_extra )* }, { $name }); + // cdr + $crate::impl_bench_name_tests!($module, $new_test_exec, $exec_name, $test, $extra, + { $( $names_extra )* }, $( { $rest } )+); + }; +} + // Creates a `SelectedBenchmark` enum implementing `BenchmarkingSetup`. // // Every variant must implement [`BenchmarkingSetup`]. @@ -1013,13 +1235,54 @@ macro_rules! impl_benchmark_test { /// new_test_ext().execute_with(|| { /// assert_ok!(test_benchmark_accumulate_dummy::()); /// assert_ok!(test_benchmark_set_dummy::()); -/// assert_ok!(test_benchmark_another_set_dummy::()); /// assert_ok!(test_benchmark_sort_vector::()); /// }); /// } /// } /// ``` /// +/// When called inside the `benchmarks` macro of the `pallet_example` as +/// +/// ```rust,ignore +/// benchmarks! { +/// // Benchmarks omitted for brevity +/// +/// impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::tests::Test); +/// } +/// ``` +/// +/// It expands to the equivalent of: +/// +/// ```rust,ignore +/// #[cfg(test)] +/// mod benchmarking { +/// use super::*; +/// use crate::tests::{new_test_ext, Test}; +/// use frame_support::assert_ok; +/// +/// #[test] +/// fn bench_accumulate_dummy() { +/// new_test_ext().execute_with(|| { +/// assert_ok!(test_benchmark_accumulate_dummy::()); +/// } +/// } +/// +/// #[test] +/// fn bench_set_dummy() { +/// new_test_ext().execute_with(|| { +/// assert_ok!(test_benchmark_set_dummy::()); +/// } +/// } +/// +/// #[test] +/// fn bench_sort_vector() { +/// new_test_ext().execute_with(|| { +/// assert_ok!(test_benchmark_sort_vector::()); +/// } +/// } +/// } +/// ``` +/// /// ## Arguments /// /// The first argument, `module`, must be the path to this crate's module. @@ -1092,16 +1355,50 @@ macro_rules! impl_benchmark_test { // just iterate over the `Benchmarking::benchmarks` list to run the actual implementations. #[macro_export] macro_rules! impl_benchmark_test_suite { + ( + $bench_module:ident, + $new_test_ext:expr, + $test:path + $(, $( $rest:tt )* )? + ) => { + $crate::impl_test_function!( + () + () + () + $bench_module, + $new_test_ext, + $test + $(, $( $rest )* )? + ); + } +} + +// Takes all arguments from `impl_benchmark_test_suite` and three additional arguments. +// +// Can be configured to generate one #[test] fn per bench case or +// one #[test] fn for all bench cases. +// This depends on whether or not the first argument contains a non-empty list of bench names. +#[macro_export] +#[doc(hidden)] +macro_rules! impl_test_function { // user might or might not have set some keyword arguments; set the defaults // // The weird syntax indicates that `rest` comes only after a comma, which is otherwise optional ( + ( $( $names:tt )* ) + ( $( $names_extra:tt )* ) + ( $( $names_skip_meta:tt )* ) + $bench_module:ident, $new_test_ext:expr, $test:path $(, $( $rest:tt )* )? ) => { - $crate::impl_benchmark_test_suite!( + $crate::impl_test_function!( + @cases: + ( $( $names )* ) + ( $( $names_extra )* ) + ( $( $names_skip_meta )* ) @selected: $bench_module, $new_test_ext, @@ -1115,6 +1412,10 @@ macro_rules! impl_benchmark_test_suite { }; // pick off the benchmarks_path keyword argument ( + @cases: + ( $( $names:tt )* ) + ( $( $names_extra:tt )* ) + ( $( $names_skip_meta:tt )* ) @selected: $bench_module:ident, $new_test_ext:expr, @@ -1126,7 +1427,11 @@ macro_rules! impl_benchmark_test_suite { benchmarks_path = $benchmarks_path:ident $(, $( $rest:tt )* )? ) => { - $crate::impl_benchmark_test_suite!( + $crate::impl_test_function!( + @cases: + ( $( $names )* ) + ( $( $names_extra )* ) + ( $( $names_skip_meta )* ) @selected: $bench_module, $new_test_ext, @@ -1140,6 +1445,10 @@ macro_rules! impl_benchmark_test_suite { }; // pick off the extra keyword argument ( + @cases: + ( $( $names:tt )* ) + ( $( $names_extra:tt )* ) + ( $( $names_skip_meta:tt )* ) @selected: $bench_module:ident, $new_test_ext:expr, @@ -1151,7 +1460,11 @@ macro_rules! impl_benchmark_test_suite { extra = $extra:expr $(, $( $rest:tt )* )? ) => { - $crate::impl_benchmark_test_suite!( + $crate::impl_test_function!( + @cases: + ( $( $names )* ) + ( $( $names_extra )* ) + ( $( $names_skip_meta )* ) @selected: $bench_module, $new_test_ext, @@ -1165,6 +1478,10 @@ macro_rules! impl_benchmark_test_suite { }; // pick off the exec_name keyword argument ( + @cases: + ( $( $names:tt )* ) + ( $( $names_extra:tt )* ) + ( $( $names_skip_meta:tt )* ) @selected: $bench_module:ident, $new_test_ext:expr, @@ -1176,7 +1493,11 @@ macro_rules! impl_benchmark_test_suite { exec_name = $exec_name:ident $(, $( $rest:tt )* )? ) => { - $crate::impl_benchmark_test_suite!( + $crate::impl_test_function!( + @cases: + ( $( $names )* ) + ( $( $names_extra )* ) + ( $( $names_skip_meta )* ) @selected: $bench_module, $new_test_ext, @@ -1188,8 +1509,34 @@ macro_rules! impl_benchmark_test_suite { $( $( $rest )* )? ); }; - // all options set; nothing else in user-provided keyword arguments + // iteration-exit arm which generates a #[test] function for each case. + ( + @cases: + ( $( $names:tt )+ ) + ( $( $names_extra:tt )* ) + ( $( $names_skip_meta:tt )* ) + @selected: + $bench_module:ident, + $new_test_ext:expr, + $test:path, + benchmarks_path = $path_to_benchmarks_invocation:ident, + extra = $extra:expr, + exec_name = $exec_name:ident, + @user: + $(,)? + ) => { + $crate::impl_bench_case_tests!( + { $bench_module, $new_test_ext, $exec_name, $test, $extra } + { $( $names_extra:tt )* } + $($names)+ + ); + }; + // iteration-exit arm which generates one #[test] function for all cases. ( + @cases: + () + () + () @selected: $bench_module:ident, $new_test_ext:expr, diff --git a/frame/benchmarking/src/tests_instance.rs b/frame/benchmarking/src/tests_instance.rs index caccebd39c70b..0ad156ce5a88d 100644 --- a/frame/benchmarking/src/tests_instance.rs +++ b/frame/benchmarking/src/tests_instance.rs @@ -173,11 +173,11 @@ mod benchmarks { } verify { ensure!(m[0] == 0, "You forgot to sort!") } - } - crate::impl_benchmark_test_suite!( - Pallet, - crate::tests_instance::new_test_ext(), - crate::tests_instance::Test - ); + impl_benchmark_test_suite!( + Pallet, + crate::tests_instance::new_test_ext(), + crate::tests_instance::Test + ) + } } diff --git a/frame/benchmarking/src/utils.rs b/frame/benchmarking/src/utils.rs index 158f5c5b57573..c24ad2f64e18d 100644 --- a/frame/benchmarking/src/utils.rs +++ b/frame/benchmarking/src/utils.rs @@ -16,7 +16,6 @@ // limitations under the License. //! Interfaces, types and utils for benchmarking a FRAME runtime. - use codec::{Decode, Encode}; use frame_support::{ dispatch::{DispatchError, DispatchErrorWithPostInfo}, diff --git a/frame/bounties/Cargo.toml b/frame/bounties/Cargo.toml index 3bb184d5b3393..93a7ababb2ebd 100644 --- a/frame/bounties/Cargo.toml +++ b/frame/bounties/Cargo.toml @@ -22,24 +22,27 @@ sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../pr frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } pallet-treasury = { version = "4.0.0-dev", default-features = false, path = "../treasury" } - +sp-io = { version = "4.0.0-dev", path = "../../primitives/io", default-features = false } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core", default-features = false } frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } +log = { version = "0.4.14", default-features = false } [dev-dependencies] -sp-io = { version = "4.0.0-dev", path = "../../primitives/io" } -sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } pallet-balances = { version = "4.0.0-dev", path = "../balances" } [features] default = ["std"] std = [ "codec/std", + "sp-core/std", + "sp-io/std", "scale-info/std", "sp-std/std", "sp-runtime/std", "frame-support/std", "frame-system/std", "pallet-treasury/std", + "log/std", ] runtime-benchmarks = [ "frame-benchmarking", diff --git a/frame/bounties/src/benchmarking.rs b/frame/bounties/src/benchmarking.rs index 798d929d241f7..33af02fbb9ea0 100644 --- a/frame/bounties/src/benchmarking.rs +++ b/frame/bounties/src/benchmarking.rs @@ -21,12 +21,11 @@ use super::*; -use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; -use frame_support::traits::OnInitialize; +use frame_benchmarking::{account, benchmarks, whitelisted_caller}; use frame_system::RawOrigin; use sp_runtime::traits::Bounded; -use crate::Module as Bounties; +use crate::Pallet as Bounties; use pallet_treasury::Pallet as Treasury; const SEED: u32 = 0; @@ -36,10 +35,10 @@ fn create_approved_bounties(n: u32) -> Result<(), &'static str> { for i in 0..n { let (caller, _curator, _fee, value, reason) = setup_bounty::(i, MAX_BYTES); Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; - let bounty_id = BountyCount::get() - 1; + let bounty_id = BountyCount::::get() - 1; Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; } - ensure!(BountyApprovals::get().len() == n as usize, "Not all bounty approved"); + ensure!(BountyApprovals::::get().len() == n as usize, "Not all bounty approved"); Ok(()) } @@ -64,7 +63,7 @@ fn create_bounty( let (caller, curator, fee, value, reason) = setup_bounty::(0, MAX_BYTES); let curator_lookup = T::Lookup::unlookup(curator.clone()); Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; - let bounty_id = BountyCount::get() - 1; + let bounty_id = BountyCount::::get() - 1; Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; Treasury::::on_initialize(T::BlockNumber::zero()); Bounties::::propose_curator(RawOrigin::Root.into(), bounty_id, curator_lookup.clone(), fee)?; @@ -94,7 +93,7 @@ benchmarks! { approve_bounty { let (caller, curator, fee, value, reason) = setup_bounty::(0, MAX_BYTES); Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; - let bounty_id = BountyCount::get() - 1; + let bounty_id = BountyCount::::get() - 1; }: _(RawOrigin::Root, bounty_id) propose_curator { @@ -102,7 +101,7 @@ benchmarks! { let (caller, curator, fee, value, reason) = setup_bounty::(0, MAX_BYTES); let curator_lookup = T::Lookup::unlookup(curator.clone()); Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; - let bounty_id = BountyCount::get() - 1; + let bounty_id = BountyCount::::get() - 1; Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; Bounties::::on_initialize(T::BlockNumber::zero()); }: _(RawOrigin::Root, bounty_id, curator_lookup, fee) @@ -112,7 +111,7 @@ benchmarks! { setup_pot_account::(); let (curator_lookup, bounty_id) = create_bounty::()?; Bounties::::on_initialize(T::BlockNumber::zero()); - let bounty_id = BountyCount::get() - 1; + let bounty_id = BountyCount::::get() - 1; frame_system::Pallet::::set_block_number(T::BountyUpdatePeriod::get() + 1u32.into()); let caller = whitelisted_caller(); }: _(RawOrigin::Signed(caller), bounty_id) @@ -122,7 +121,7 @@ benchmarks! { let (caller, curator, fee, value, reason) = setup_bounty::(0, MAX_BYTES); let curator_lookup = T::Lookup::unlookup(curator.clone()); Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; - let bounty_id = BountyCount::get() - 1; + let bounty_id = BountyCount::::get() - 1; Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; Bounties::::on_initialize(T::BlockNumber::zero()); Bounties::::propose_curator(RawOrigin::Root.into(), bounty_id, curator_lookup, fee)?; @@ -133,7 +132,7 @@ benchmarks! { let (curator_lookup, bounty_id) = create_bounty::()?; Bounties::::on_initialize(T::BlockNumber::zero()); - let bounty_id = BountyCount::get() - 1; + let bounty_id = BountyCount::::get() - 1; let curator = T::Lookup::lookup(curator_lookup).map_err(<&str>::from)?; let beneficiary = T::Lookup::unlookup(account("beneficiary", 0, SEED)); @@ -144,10 +143,9 @@ benchmarks! { let (curator_lookup, bounty_id) = create_bounty::()?; Bounties::::on_initialize(T::BlockNumber::zero()); - let bounty_id = BountyCount::get() - 1; + let bounty_id = BountyCount::::get() - 1; let curator = T::Lookup::lookup(curator_lookup).map_err(<&str>::from)?; - let beneficiary_account: T::AccountId = account("beneficiary", 0, SEED); let beneficiary = T::Lookup::unlookup(beneficiary_account.clone()); Bounties::::award_bounty(RawOrigin::Signed(curator.clone()).into(), bounty_id, beneficiary)?; @@ -164,17 +162,17 @@ benchmarks! { setup_pot_account::(); let (caller, curator, fee, value, reason) = setup_bounty::(0, 0); Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; - let bounty_id = BountyCount::get() - 1; + let bounty_id = BountyCount::::get() - 1; }: close_bounty(RawOrigin::Root, bounty_id) close_bounty_active { setup_pot_account::(); let (curator_lookup, bounty_id) = create_bounty::()?; Bounties::::on_initialize(T::BlockNumber::zero()); - let bounty_id = BountyCount::get() - 1; + let bounty_id = BountyCount::::get() - 1; }: close_bounty(RawOrigin::Root, bounty_id) verify { - assert_last_event::(RawEvent::BountyCanceled(bounty_id).into()) + assert_last_event::(Event::BountyCanceled(bounty_id).into()) } extend_bounty_expiry { @@ -182,11 +180,11 @@ benchmarks! { let (curator_lookup, bounty_id) = create_bounty::()?; Bounties::::on_initialize(T::BlockNumber::zero()); - let bounty_id = BountyCount::get() - 1; + let bounty_id = BountyCount::::get() - 1; let curator = T::Lookup::lookup(curator_lookup).map_err(<&str>::from)?; }: _(RawOrigin::Signed(curator), bounty_id, Vec::new()) verify { - assert_last_event::(RawEvent::BountyExtended(bounty_id).into()) + assert_last_event::(Event::BountyExtended(bounty_id).into()) } spend_funds { @@ -209,8 +207,8 @@ benchmarks! { verify { ensure!(budget_remaining < BalanceOf::::max_value(), "Budget not used"); ensure!(missed_any == false, "Missed some"); - assert_last_event::(RawEvent::BountyBecameActive(b - 1).into()) + assert_last_event::(Event::BountyBecameActive(b - 1).into()) } -} -impl_benchmark_test_suite!(Bounties, crate::tests::new_test_ext(), crate::tests::Test); + impl_benchmark_test_suite!(Bounties, crate::tests::new_test_ext(), crate::tests::Test) +} diff --git a/frame/bounties/src/lib.rs b/frame/bounties/src/lib.rs index 77a8e47174019..69380502bad3f 100644 --- a/frame/bounties/src/lib.rs +++ b/frame/bounties/src/lib.rs @@ -75,13 +75,12 @@ #![cfg_attr(not(feature = "std"), no_std)] mod benchmarking; +pub mod migrations; mod tests; pub mod weights; use sp_std::prelude::*; -use frame_support::{decl_error, decl_event, decl_module, decl_storage, ensure}; - use frame_support::traits::{ Currency, ExistenceRequirement::AllowDeath, Get, Imbalance, OnUnbalanced, ReservableCurrency, }; @@ -93,46 +92,17 @@ use sp_runtime::{ use frame_support::{dispatch::DispatchResultWithPostInfo, traits::EnsureOrigin}; -use frame_support::weights::Weight; - -use codec::{Decode, Encode}; -use frame_system::{self as system, ensure_signed}; +use frame_support::pallet_prelude::*; +use frame_system::pallet_prelude::*; use scale_info::TypeInfo; pub use weights::WeightInfo; +pub use pallet::*; + type BalanceOf = pallet_treasury::BalanceOf; type PositiveImbalanceOf = pallet_treasury::PositiveImbalanceOf; -pub trait Config: frame_system::Config + pallet_treasury::Config { - /// The amount held on deposit for placing a bounty proposal. - type BountyDepositBase: Get>; - - /// The delay period for which a bounty beneficiary need to wait before claim the payout. - type BountyDepositPayoutDelay: Get; - - /// Bounty duration in blocks. - type BountyUpdatePeriod: Get; - - /// Percentage of the curator fee that will be reserved upfront as deposit for bounty curator. - type BountyCuratorDeposit: Get; - - /// Minimum value for a bounty. - type BountyValueMinimum: Get>; - - /// The amount held on deposit per byte within the tip report reason or bounty description. - type DataDepositPerByte: Get>; - - /// The overarching event type. - type Event: From> + Into<::Event>; - - /// Maximum acceptable reason length. - type MaximumReasonLength: Get; - - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; -} - /// An index of a bounty. Just a `u32`. pub type BountyIndex = u32; @@ -186,55 +156,54 @@ pub enum BountyStatus { }, } -// Note :: For backward compatibility reasons, -// pallet-bounties uses Treasury for storage. -// This is temporary solution, soon will get replaced with -// Own storage identifier. -decl_storage! { - trait Store for Module as Treasury { +#[frame_support::pallet] +pub mod pallet { + use super::*; - /// Number of bounty proposals that have been made. - pub BountyCount get(fn bounty_count): BountyIndex; + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); - /// Bounties that have been made. - pub Bounties get(fn bounties): - map hasher(twox_64_concat) BountyIndex - => Option, T::BlockNumber>>; + #[pallet::config] + pub trait Config: frame_system::Config + pallet_treasury::Config { + /// The amount held on deposit for placing a bounty proposal. + #[pallet::constant] + type BountyDepositBase: Get>; - /// The description of each bounty. - pub BountyDescriptions get(fn bounty_descriptions): map hasher(twox_64_concat) BountyIndex => Option>; + /// The delay period for which a bounty beneficiary need to wait before claim the payout. + #[pallet::constant] + type BountyDepositPayoutDelay: Get; - /// Bounty indices that have been approved but not yet funded. - pub BountyApprovals get(fn bounty_approvals): Vec; - } -} + /// Bounty duration in blocks. + #[pallet::constant] + type BountyUpdatePeriod: Get; -decl_event!( - pub enum Event - where - Balance = BalanceOf, - ::AccountId, - { - /// New bounty proposal. \[index\] - BountyProposed(BountyIndex), - /// A bounty proposal was rejected; funds were slashed. \[index, bond\] - BountyRejected(BountyIndex, Balance), - /// A bounty proposal is funded and became active. \[index\] - BountyBecameActive(BountyIndex), - /// A bounty is awarded to a beneficiary. \[index, beneficiary\] - BountyAwarded(BountyIndex, AccountId), - /// A bounty is claimed by beneficiary. \[index, payout, beneficiary\] - BountyClaimed(BountyIndex, Balance, AccountId), - /// A bounty is cancelled. \[index\] - BountyCanceled(BountyIndex), - /// A bounty expiry is extended. \[index\] - BountyExtended(BountyIndex), + /// Percentage of the curator fee that will be reserved upfront as deposit for bounty + /// curator. + #[pallet::constant] + type BountyCuratorDeposit: Get; + + /// Minimum value for a bounty. + #[pallet::constant] + type BountyValueMinimum: Get>; + + /// The amount held on deposit per byte within the tip report reason or bounty description. + #[pallet::constant] + type DataDepositPerByte: Get>; + + /// The overarching event type. + type Event: From> + IsType<::Event>; + + /// Maximum acceptable reason length. + #[pallet::constant] + type MaximumReasonLength: Get; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; } -); -decl_error! { - /// Error for the treasury module. - pub enum Error for Module { + #[pallet::error] + pub enum Error { /// Proposer's balance is too low. InsufficientProposersBalance, /// No proposal or bounty at that index. @@ -255,38 +224,53 @@ decl_error! { /// The bounties cannot be claimed/closed because it's still in the countdown period. Premature, } -} - -decl_module! { - pub struct Module - for enum Call - where origin: T::Origin - { - /// The amount held on deposit per byte within bounty description. - const DataDepositPerByte: BalanceOf = T::DataDepositPerByte::get(); - - /// The amount held on deposit for placing a bounty proposal. - const BountyDepositBase: BalanceOf = T::BountyDepositBase::get(); - - /// The delay period for which a bounty beneficiary need to wait before claim the payout. - const BountyDepositPayoutDelay: T::BlockNumber = T::BountyDepositPayoutDelay::get(); - /// Bounty duration in blocks. - const BountyUpdatePeriod: T::BlockNumber = T::BountyUpdatePeriod::get(); - - /// Percentage of the curator fee that will be reserved upfront as deposit for bounty curator. - const BountyCuratorDeposit: Permill = T::BountyCuratorDeposit::get(); - - /// Minimum value for a bounty. - const BountyValueMinimum: BalanceOf = T::BountyValueMinimum::get(); - - /// Maximum acceptable reason length. - const MaximumReasonLength: u32 = T::MaximumReasonLength::get(); - - type Error = Error; - - fn deposit_event() = default; + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// New bounty proposal. \[index\] + BountyProposed(BountyIndex), + /// A bounty proposal was rejected; funds were slashed. \[index, bond\] + BountyRejected(BountyIndex, BalanceOf), + /// A bounty proposal is funded and became active. \[index\] + BountyBecameActive(BountyIndex), + /// A bounty is awarded to a beneficiary. \[index, beneficiary\] + BountyAwarded(BountyIndex, T::AccountId), + /// A bounty is claimed by beneficiary. \[index, payout, beneficiary\] + BountyClaimed(BountyIndex, BalanceOf, T::AccountId), + /// A bounty is cancelled. \[index\] + BountyCanceled(BountyIndex), + /// A bounty expiry is extended. \[index\] + BountyExtended(BountyIndex), + } + /// Number of bounty proposals that have been made. + #[pallet::storage] + #[pallet::getter(fn bounty_count)] + pub type BountyCount = StorageValue<_, BountyIndex, ValueQuery>; + + /// Bounties that have been made. + #[pallet::storage] + #[pallet::getter(fn bounties)] + pub type Bounties = StorageMap< + _, + Twox64Concat, + BountyIndex, + Bounty, T::BlockNumber>, + >; + + /// The description of each bounty. + #[pallet::storage] + #[pallet::getter(fn bounty_descriptions)] + pub type BountyDescriptions = StorageMap<_, Twox64Concat, BountyIndex, Vec>; + + /// Bounty indices that have been approved but not yet funded. + #[pallet::storage] + #[pallet::getter(fn bounty_approvals)] + pub type BountyApprovals = StorageValue<_, Vec, ValueQuery>; + + #[pallet::call] + impl Pallet { /// Propose a new bounty. /// /// The dispatch origin for this call must be _Signed_. @@ -299,14 +283,15 @@ decl_module! { /// - `fee`: The curator fee. /// - `value`: The total payment amount of this bounty, curator fee included. /// - `description`: The description of this bounty. - #[weight = ::WeightInfo::propose_bounty(description.len() as u32)] - fn propose_bounty( - origin, - #[compact] value: BalanceOf, + #[pallet::weight(::WeightInfo::propose_bounty(description.len() as u32))] + pub fn propose_bounty( + origin: OriginFor, + #[pallet::compact] value: BalanceOf, description: Vec, - ) { + ) -> DispatchResult { let proposer = ensure_signed(origin)?; Self::create_bounty(proposer, description, value)?; + Ok(()) } /// Approve a bounty proposal. At a later time, the bounty will be funded and become active @@ -317,8 +302,11 @@ decl_module! { /// # /// - O(1). /// # - #[weight = ::WeightInfo::approve_bounty()] - fn approve_bounty(origin, #[compact] bounty_id: BountyIndex) { + #[pallet::weight(::WeightInfo::approve_bounty())] + pub fn approve_bounty( + origin: OriginFor, + #[pallet::compact] bounty_id: BountyIndex, + ) -> DispatchResult { T::ApproveOrigin::ensure_origin(origin)?; Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { @@ -327,10 +315,11 @@ decl_module! { bounty.status = BountyStatus::Approved; - BountyApprovals::append(bounty_id); + BountyApprovals::::append(bounty_id); Ok(()) })?; + Ok(()) } /// Assign a curator to a funded bounty. @@ -340,18 +329,17 @@ decl_module! { /// # /// - O(1). /// # - #[weight = ::WeightInfo::propose_curator()] - fn propose_curator( - origin, - #[compact] bounty_id: BountyIndex, + #[pallet::weight(::WeightInfo::propose_curator())] + pub fn propose_curator( + origin: OriginFor, + #[pallet::compact] bounty_id: BountyIndex, curator: ::Source, - #[compact] fee: BalanceOf, - ) { + #[pallet::compact] fee: BalanceOf, + ) -> DispatchResult { T::ApproveOrigin::ensure_origin(origin)?; let curator = T::Lookup::lookup(curator)?; Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { - let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; match bounty.status { BountyStatus::Proposed | BountyStatus::Approved | BountyStatus::Funded => {}, @@ -365,14 +353,15 @@ decl_module! { Ok(()) })?; + Ok(()) } /// Unassign curator from a bounty. /// /// This function can only be called by the `RejectOrigin` a signed origin. /// - /// If this function is called by the `RejectOrigin`, we assume that the curator is malicious - /// or inactive. As a result, we will slash the curator when possible. + /// If this function is called by the `RejectOrigin`, we assume that the curator is + /// malicious or inactive. As a result, we will slash the curator when possible. /// /// If the origin is the curator, we take this as a sign they are unable to do their job and /// they willingly give up. We could slash them, but for now we allow them to recover their @@ -385,11 +374,11 @@ decl_module! { /// # /// - O(1). /// # - #[weight = ::WeightInfo::unassign_curator()] - fn unassign_curator( - origin, - #[compact] bounty_id: BountyIndex, - ) { + #[pallet::weight(::WeightInfo::unassign_curator())] + pub fn unassign_curator( + origin: OriginFor, + #[pallet::compact] bounty_id: BountyIndex, + ) -> DispatchResult { let maybe_sender = ensure_signed(origin.clone()) .map(Some) .or_else(|_| T::RejectOrigin::ensure_origin(origin).map(|_| None))?; @@ -407,7 +396,7 @@ decl_module! { BountyStatus::Proposed | BountyStatus::Approved | BountyStatus::Funded => { // No curator to unassign at this point. return Err(Error::::UnexpectedStatus.into()) - } + }, BountyStatus::CuratorProposed { ref curator } => { // A curator has been proposed, but not accepted yet. // Either `RejectOrigin` or the proposed curator can unassign the curator. @@ -425,10 +414,10 @@ decl_module! { // If the sender is not the curator, and the curator is inactive, // slash the curator. if sender != *curator { - let block_number = system::Pallet::::block_number(); + let block_number = frame_system::Pallet::::block_number(); if *update_due < block_number { slash_curator(curator, &mut bounty.curator_deposit); - // Continue to change bounty status below... + // Continue to change bounty status below... } else { // Curator has more time to give an update. return Err(Error::::Premature.into()) @@ -436,7 +425,8 @@ decl_module! { } else { // Else this is the curator, willingly giving up their role. // Give back their deposit. - let err_amount = T::Currency::unreserve(&curator, bounty.curator_deposit); + let err_amount = + T::Currency::unreserve(&curator, bounty.curator_deposit); debug_assert!(err_amount.is_zero()); // Continue to change bounty status below... } @@ -450,12 +440,13 @@ decl_module! { ensure!(maybe_sender.is_none(), BadOrigin); slash_curator(curator, &mut bounty.curator_deposit); // Continue to change bounty status below... - } + }, }; bounty.status = BountyStatus::Funded; Ok(()) })?; + Ok(()) } /// Accept the curator role for a bounty. @@ -466,8 +457,11 @@ decl_module! { /// # /// - O(1). /// # - #[weight = ::WeightInfo::accept_curator()] - fn accept_curator(origin, #[compact] bounty_id: BountyIndex) { + #[pallet::weight(::WeightInfo::accept_curator())] + pub fn accept_curator( + origin: OriginFor, + #[pallet::compact] bounty_id: BountyIndex, + ) -> DispatchResult { let signer = ensure_signed(origin)?; Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { @@ -481,17 +475,21 @@ decl_module! { T::Currency::reserve(curator, deposit)?; bounty.curator_deposit = deposit; - let update_due = system::Pallet::::block_number() + T::BountyUpdatePeriod::get(); - bounty.status = BountyStatus::Active { curator: curator.clone(), update_due }; + let update_due = frame_system::Pallet::::block_number() + + T::BountyUpdatePeriod::get(); + bounty.status = + BountyStatus::Active { curator: curator.clone(), update_due }; Ok(()) }, _ => Err(Error::::UnexpectedStatus.into()), } })?; + Ok(()) } - /// Award bounty to a beneficiary account. The beneficiary will be able to claim the funds after a delay. + /// Award bounty to a beneficiary account. The beneficiary will be able to claim the funds + /// after a delay. /// /// The dispatch origin for this call must be the curator of this bounty. /// @@ -501,18 +499,19 @@ decl_module! { /// # /// - O(1). /// # - #[weight = ::WeightInfo::award_bounty()] - fn award_bounty(origin, #[compact] bounty_id: BountyIndex, beneficiary: ::Source) { + #[pallet::weight(::WeightInfo::award_bounty())] + pub fn award_bounty( + origin: OriginFor, + #[pallet::compact] bounty_id: BountyIndex, + beneficiary: ::Source, + ) -> DispatchResult { let signer = ensure_signed(origin)?; let beneficiary = T::Lookup::lookup(beneficiary)?; Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; match &bounty.status { - BountyStatus::Active { - curator, - .. - } => { + BountyStatus::Active { curator, .. } => { ensure!(signer == *curator, Error::::RequireCurator); }, _ => return Err(Error::::UnexpectedStatus.into()), @@ -520,13 +519,15 @@ decl_module! { bounty.status = BountyStatus::PendingPayout { curator: signer, beneficiary: beneficiary.clone(), - unlock_at: system::Pallet::::block_number() + T::BountyDepositPayoutDelay::get(), + unlock_at: frame_system::Pallet::::block_number() + + T::BountyDepositPayoutDelay::get(), }; Ok(()) })?; Self::deposit_event(Event::::BountyAwarded(bounty_id, beneficiary)); + Ok(()) } /// Claim the payout from an awarded bounty after payout delay. @@ -538,14 +539,22 @@ decl_module! { /// # /// - O(1). /// # - #[weight = ::WeightInfo::claim_bounty()] - fn claim_bounty(origin, #[compact] bounty_id: BountyIndex) { + #[pallet::weight(::WeightInfo::claim_bounty())] + pub fn claim_bounty( + origin: OriginFor, + #[pallet::compact] bounty_id: BountyIndex, + ) -> DispatchResult { let _ = ensure_signed(origin)?; // anyone can trigger claim Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { let bounty = maybe_bounty.take().ok_or(Error::::InvalidIndex)?; - if let BountyStatus::PendingPayout { curator, beneficiary, unlock_at } = bounty.status { - ensure!(system::Pallet::::block_number() >= unlock_at, Error::::Premature); + if let BountyStatus::PendingPayout { curator, beneficiary, unlock_at } = + bounty.status + { + ensure!( + frame_system::Pallet::::block_number() >= unlock_at, + Error::::Premature + ); let bounty_account = Self::bounty_account_id(bounty_id); let balance = T::Currency::free_balance(&bounty_account); let fee = bounty.fee.min(balance); // just to be safe @@ -554,12 +563,13 @@ decl_module! { debug_assert!(err_amount.is_zero()); let res = T::Currency::transfer(&bounty_account, &curator, fee, AllowDeath); // should not fail debug_assert!(res.is_ok()); - let res = T::Currency::transfer(&bounty_account, &beneficiary, payout, AllowDeath); // should not fail + let res = + T::Currency::transfer(&bounty_account, &beneficiary, payout, AllowDeath); // should not fail debug_assert!(res.is_ok()); *maybe_bounty = None; - BountyDescriptions::remove(bounty_id); + BountyDescriptions::::remove(bounty_id); Self::deposit_event(Event::::BountyClaimed(bounty_id, payout, beneficiary)); Ok(()) @@ -567,6 +577,7 @@ decl_module! { Err(Error::::UnexpectedStatus.into()) } })?; + Ok(()) } /// Cancel a proposed or active bounty. All the funds will be sent to treasury and @@ -579,62 +590,76 @@ decl_module! { /// # /// - O(1). /// # - #[weight = ::WeightInfo::close_bounty_proposed().max(::WeightInfo::close_bounty_active())] - fn close_bounty(origin, #[compact] bounty_id: BountyIndex) -> DispatchResultWithPostInfo { + #[pallet::weight(::WeightInfo::close_bounty_proposed() + .max(::WeightInfo::close_bounty_active()))] + pub fn close_bounty( + origin: OriginFor, + #[pallet::compact] bounty_id: BountyIndex, + ) -> DispatchResultWithPostInfo { T::RejectOrigin::ensure_origin(origin)?; - Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResultWithPostInfo { - let bounty = maybe_bounty.as_ref().ok_or(Error::::InvalidIndex)?; - - match &bounty.status { - BountyStatus::Proposed => { - // The reject origin would like to cancel a proposed bounty. - BountyDescriptions::remove(bounty_id); - let value = bounty.bond; - let imbalance = T::Currency::slash_reserved(&bounty.proposer, value).0; - T::OnSlash::on_unbalanced(imbalance); - *maybe_bounty = None; - - Self::deposit_event(Event::::BountyRejected(bounty_id, value)); - // Return early, nothing else to do. - return Ok(Some(::WeightInfo::close_bounty_proposed()).into()) - }, - BountyStatus::Approved => { - // For weight reasons, we don't allow a council to cancel in this phase. - // We ask for them to wait until it is funded before they can cancel. - return Err(Error::::UnexpectedStatus.into()) - }, - BountyStatus::Funded | - BountyStatus::CuratorProposed { .. } => { - // Nothing extra to do besides the removal of the bounty below. - }, - BountyStatus::Active { curator, .. } => { - // Cancelled by council, refund deposit of the working curator. - let err_amount = T::Currency::unreserve(&curator, bounty.curator_deposit); - debug_assert!(err_amount.is_zero()); - // Then execute removal of the bounty below. - }, - BountyStatus::PendingPayout { .. } => { - // Bounty is already pending payout. If council wants to cancel - // this bounty, it should mean the curator was acting maliciously. - // So the council should first unassign the curator, slashing their - // deposit. - return Err(Error::::PendingPayout.into()) + Bounties::::try_mutate_exists( + bounty_id, + |maybe_bounty| -> DispatchResultWithPostInfo { + let bounty = maybe_bounty.as_ref().ok_or(Error::::InvalidIndex)?; + + match &bounty.status { + BountyStatus::Proposed => { + // The reject origin would like to cancel a proposed bounty. + BountyDescriptions::::remove(bounty_id); + let value = bounty.bond; + let imbalance = T::Currency::slash_reserved(&bounty.proposer, value).0; + T::OnSlash::on_unbalanced(imbalance); + *maybe_bounty = None; + + Self::deposit_event(Event::::BountyRejected(bounty_id, value)); + // Return early, nothing else to do. + return Ok( + Some(::WeightInfo::close_bounty_proposed()).into() + ) + }, + BountyStatus::Approved => { + // For weight reasons, we don't allow a council to cancel in this phase. + // We ask for them to wait until it is funded before they can cancel. + return Err(Error::::UnexpectedStatus.into()) + }, + BountyStatus::Funded | BountyStatus::CuratorProposed { .. } => { + // Nothing extra to do besides the removal of the bounty below. + }, + BountyStatus::Active { curator, .. } => { + // Cancelled by council, refund deposit of the working curator. + let err_amount = + T::Currency::unreserve(&curator, bounty.curator_deposit); + debug_assert!(err_amount.is_zero()); + // Then execute removal of the bounty below. + }, + BountyStatus::PendingPayout { .. } => { + // Bounty is already pending payout. If council wants to cancel + // this bounty, it should mean the curator was acting maliciously. + // So the council should first unassign the curator, slashing their + // deposit. + return Err(Error::::PendingPayout.into()) + }, } - } - let bounty_account = Self::bounty_account_id(bounty_id); + let bounty_account = Self::bounty_account_id(bounty_id); - BountyDescriptions::remove(bounty_id); + BountyDescriptions::::remove(bounty_id); - let balance = T::Currency::free_balance(&bounty_account); - let res = T::Currency::transfer(&bounty_account, &Self::account_id(), balance, AllowDeath); // should not fail - debug_assert!(res.is_ok()); - *maybe_bounty = None; + let balance = T::Currency::free_balance(&bounty_account); + let res = T::Currency::transfer( + &bounty_account, + &Self::account_id(), + balance, + AllowDeath, + ); // should not fail + debug_assert!(res.is_ok()); + *maybe_bounty = None; - Self::deposit_event(Event::::BountyCanceled(bounty_id)); - Ok(Some(::WeightInfo::close_bounty_active()).into()) - }) + Self::deposit_event(Event::::BountyCanceled(bounty_id)); + Ok(Some(::WeightInfo::close_bounty_active()).into()) + }, + ) } /// Extend the expiry time of an active bounty. @@ -647,8 +672,12 @@ decl_module! { /// # /// - O(1). /// # - #[weight = ::WeightInfo::extend_bounty_expiry()] - fn extend_bounty_expiry(origin, #[compact] bounty_id: BountyIndex, _remark: Vec) { + #[pallet::weight(::WeightInfo::extend_bounty_expiry())] + pub fn extend_bounty_expiry( + origin: OriginFor, + #[pallet::compact] bounty_id: BountyIndex, + _remark: Vec, + ) -> DispatchResult { let signer = ensure_signed(origin)?; Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { @@ -657,7 +686,9 @@ decl_module! { match bounty.status { BountyStatus::Active { ref curator, ref mut update_due } => { ensure!(*curator == signer, Error::::RequireCurator); - *update_due = (system::Pallet::::block_number() + T::BountyUpdatePeriod::get()).max(*update_due); + *update_due = (frame_system::Pallet::::block_number() + + T::BountyUpdatePeriod::get()) + .max(*update_due); }, _ => return Err(Error::::UnexpectedStatus.into()), } @@ -666,11 +697,12 @@ decl_module! { })?; Self::deposit_event(Event::::BountyExtended(bounty_id)); + Ok(()) } } } -impl Module { +impl Pallet { // Add public immutables and private mutables. /// The account ID of the treasury pot. @@ -707,7 +739,7 @@ impl Module { T::Currency::reserve(&proposer, bond) .map_err(|_| Error::::InsufficientProposersBalance)?; - BountyCount::put(index + 1); + BountyCount::::put(index + 1); let bounty = Bounty { proposer, @@ -719,22 +751,22 @@ impl Module { }; Bounties::::insert(index, &bounty); - BountyDescriptions::insert(index, description); + BountyDescriptions::::insert(index, description); - Self::deposit_event(RawEvent::BountyProposed(index)); + Self::deposit_event(Event::::BountyProposed(index)); Ok(()) } } -impl pallet_treasury::SpendFunds for Module { +impl pallet_treasury::SpendFunds for Pallet { fn spend_funds( budget_remaining: &mut BalanceOf, imbalance: &mut PositiveImbalanceOf, total_weight: &mut Weight, missed_any: &mut bool, ) { - let bounties_len = BountyApprovals::mutate(|v| { + let bounties_len = BountyApprovals::::mutate(|v| { let bounties_approval_len = v.len() as u32; v.retain(|&index| { Bounties::::mutate(index, |bounty| { @@ -755,7 +787,7 @@ impl pallet_treasury::SpendFunds for Module { bounty.value, )); - Self::deposit_event(RawEvent::BountyBecameActive(index)); + Self::deposit_event(Event::::BountyBecameActive(index)); false } else { *missed_any = true; diff --git a/frame/bounties/src/migrations/mod.rs b/frame/bounties/src/migrations/mod.rs new file mode 100644 index 0000000000000..26d07a0cd5ac8 --- /dev/null +++ b/frame/bounties/src/migrations/mod.rs @@ -0,0 +1,19 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/// Version 4. +pub mod v4; diff --git a/frame/bounties/src/migrations/v4.rs b/frame/bounties/src/migrations/v4.rs new file mode 100644 index 0000000000000..a1ca0e47680b0 --- /dev/null +++ b/frame/bounties/src/migrations/v4.rs @@ -0,0 +1,230 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_support::{ + storage::{generator::StorageValue, StoragePrefixedMap}, + traits::{ + Get, GetStorageVersion, PalletInfoAccess, StorageVersion, + STORAGE_VERSION_STORAGE_KEY_POSTFIX, + }, + weights::Weight, +}; +use sp_core::hexdisplay::HexDisplay; +use sp_io::{hashing::twox_128, storage}; +use sp_std::str; + +use crate as pallet_bounties; + +/// Migrate the storage of the bounties pallet to a new prefix, leaving all other storage untouched +/// +/// This new prefix must be the same as the one set in construct_runtime. For safety, use +/// `PalletInfo` to get it, as: +/// `::PalletInfo::name::`. +/// +/// The migration will look into the storage version in order not to trigger a migration on an up +/// to date storage. Thus the on chain storage version must be less than 4 in order to trigger the +/// migration. +pub fn migrate< + T: pallet_bounties::Config, + P: GetStorageVersion + PalletInfoAccess, + N: AsRef, +>( + old_pallet_name: N, + new_pallet_name: N, +) -> Weight { + let old_pallet_name = old_pallet_name.as_ref(); + let new_pallet_name = new_pallet_name.as_ref(); + + if new_pallet_name == old_pallet_name { + log::info!( + target: "runtime::bounties", + "New pallet name is equal to the old prefix. No migration needs to be done.", + ); + return 0 + } + + let on_chain_storage_version =

::on_chain_storage_version(); + log::info!( + target: "runtime::bounties", + "Running migration to v4 for bounties with storage version {:?}", + on_chain_storage_version, + ); + + if on_chain_storage_version < 4 { + let storage_prefix = pallet_bounties::BountyCount::::storage_prefix(); + frame_support::storage::migration::move_storage_from_pallet( + storage_prefix, + old_pallet_name.as_bytes(), + new_pallet_name.as_bytes(), + ); + log_migration("migration", storage_prefix, old_pallet_name, new_pallet_name); + + let storage_prefix = pallet_bounties::Bounties::::storage_prefix(); + frame_support::storage::migration::move_storage_from_pallet( + storage_prefix, + old_pallet_name.as_bytes(), + new_pallet_name.as_bytes(), + ); + log_migration("migration", storage_prefix, old_pallet_name, new_pallet_name); + + let storage_prefix = pallet_bounties::BountyDescriptions::::storage_prefix(); + frame_support::storage::migration::move_storage_from_pallet( + storage_prefix, + old_pallet_name.as_bytes(), + new_pallet_name.as_bytes(), + ); + log_migration("migration", storage_prefix, old_pallet_name, new_pallet_name); + + let storage_prefix = pallet_bounties::BountyApprovals::::storage_prefix(); + frame_support::storage::migration::move_storage_from_pallet( + storage_prefix, + old_pallet_name.as_bytes(), + new_pallet_name.as_bytes(), + ); + log_migration("migration", storage_prefix, old_pallet_name, new_pallet_name); + + StorageVersion::new(4).put::

(); + ::BlockWeights::get().max_block + } else { + log::warn!( + target: "runtime::bounties", + "Attempted to apply migration to v4 but failed because storage version is {:?}", + on_chain_storage_version, + ); + 0 + } +} + +/// Some checks prior to migration. This can be linked to +/// [`frame_support::traits::OnRuntimeUpgrade::pre_upgrade`] for further testing. +/// +/// Panics if anything goes wrong. +pub fn pre_migration>( + old_pallet_name: N, + new_pallet_name: N, +) { + let old_pallet_name = old_pallet_name.as_ref(); + let new_pallet_name = new_pallet_name.as_ref(); + let storage_prefix_bounties_count = pallet_bounties::BountyCount::::storage_prefix(); + let storage_prefix_bounties = pallet_bounties::Bounties::::storage_prefix(); + let storage_prefix_bounties_description = + pallet_bounties::BountyDescriptions::::storage_prefix(); + let storage_prefix_bounties_approvals = pallet_bounties::BountyApprovals::::storage_prefix(); + log_migration("pre-migration", storage_prefix_bounties_count, old_pallet_name, new_pallet_name); + log_migration("pre-migration", storage_prefix_bounties, old_pallet_name, new_pallet_name); + log_migration( + "pre-migration", + storage_prefix_bounties_description, + old_pallet_name, + new_pallet_name, + ); + log_migration( + "pre-migration", + storage_prefix_bounties_approvals, + old_pallet_name, + new_pallet_name, + ); + + let new_pallet_prefix = twox_128(new_pallet_name.as_bytes()); + let storage_version_key = + [&new_pallet_prefix, &twox_128(STORAGE_VERSION_STORAGE_KEY_POSTFIX)[..]].concat(); + + // ensure nothing is stored in the new prefix. + assert!( + storage::next_key(&new_pallet_prefix).map_or( + // either nothing is there + true, + // or we ensure that the next key has no common prefix with twox_128(new), + // or is the pallet version that is already stored using the pallet name + |next_key| { + storage::next_key(&next_key).map_or(true, |next_key| { + !next_key.starts_with(&new_pallet_prefix) || next_key == storage_version_key + }) + }, + ), + "unexpected next_key({}) = {:?}", + new_pallet_name, + HexDisplay::from(&sp_io::storage::next_key(&new_pallet_prefix).unwrap()), + ); + assert!(

::on_chain_storage_version() < 4); +} + +/// Some checks for after migration. This can be linked to +/// [`frame_support::traits::OnRuntimeUpgrade::post_upgrade`] for further testing. +/// +/// Panics if anything goes wrong. +pub fn post_migration>( + old_pallet_name: N, + new_pallet_name: N, +) { + let old_pallet_name = old_pallet_name.as_ref(); + let new_pallet_name = new_pallet_name.as_ref(); + let storage_prefix_bounties_count = pallet_bounties::BountyCount::::storage_prefix(); + let storage_prefix_bounties = pallet_bounties::Bounties::::storage_prefix(); + let storage_prefix_bounties_description = + pallet_bounties::BountyDescriptions::::storage_prefix(); + let storage_prefix_bounties_approvals = pallet_bounties::BountyApprovals::::storage_prefix(); + log_migration( + "post-migration", + storage_prefix_bounties_count, + old_pallet_name, + new_pallet_name, + ); + log_migration("post-migration", storage_prefix_bounties, old_pallet_name, new_pallet_name); + log_migration( + "post-migration", + storage_prefix_bounties_description, + old_pallet_name, + new_pallet_name, + ); + log_migration( + "post-migration", + storage_prefix_bounties_approvals, + old_pallet_name, + new_pallet_name, + ); + + let old_pallet_prefix = twox_128(old_pallet_name.as_bytes()); + let old_bounties_count_key = + [&old_pallet_prefix, &twox_128(storage_prefix_bounties_count)[..]].concat(); + let old_bounties_key = [&old_pallet_prefix, &twox_128(storage_prefix_bounties)[..]].concat(); + let old_bounties_description_key = + [&old_pallet_prefix, &twox_128(storage_prefix_bounties_description)[..]].concat(); + let old_bounties_approvals_key = + [&old_pallet_prefix, &twox_128(storage_prefix_bounties_approvals)[..]].concat(); + assert!(storage::next_key(&old_bounties_count_key) + .map_or(true, |next_key| !next_key.starts_with(&old_bounties_count_key))); + assert!(storage::next_key(&old_bounties_key) + .map_or(true, |next_key| !next_key.starts_with(&old_bounties_key))); + assert!(storage::next_key(&old_bounties_description_key) + .map_or(true, |next_key| !next_key.starts_with(&old_bounties_description_key))); + assert!(storage::next_key(&old_bounties_approvals_key) + .map_or(true, |next_key| !next_key.starts_with(&old_bounties_approvals_key))); + + assert_eq!(

::on_chain_storage_version(), 4); +} + +fn log_migration(stage: &str, storage_prefix: &[u8], old_pallet_name: &str, new_pallet_name: &str) { + log::info!( + target: "runtime::bounties", + "{} prefix of storage '{}': '{}' ==> '{}'", + stage, + str::from_utf8(storage_prefix).unwrap_or(""), + old_pallet_name, + new_pallet_name, + ); +} diff --git a/frame/bounties/src/tests.rs b/frame/bounties/src/tests.rs index ff058a3601e07..96c09581fdd1e 100644 --- a/frame/bounties/src/tests.rs +++ b/frame/bounties/src/tests.rs @@ -32,9 +32,11 @@ use sp_core::H256; use sp_runtime::{ testing::Header, traits::{BadOrigin, BlakeTwo256, IdentityLookup}, - Perbill, + Perbill, Storage, }; +use super::Event as BountiesEvent; + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -160,7 +162,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { t.into() } -fn last_event() -> RawEvent { +fn last_event() -> BountiesEvent { System::events() .into_iter() .map(|r| r.event) @@ -396,7 +398,7 @@ fn propose_bounty_works() { assert_ok!(Bounties::propose_bounty(Origin::signed(0), 10, b"1234567890".to_vec())); - assert_eq!(last_event(), RawEvent::BountyProposed(0)); + assert_eq!(last_event(), BountiesEvent::BountyProposed(0)); let deposit: u64 = 85 + 5; assert_eq!(Balances::reserved_balance(0), deposit); @@ -458,7 +460,7 @@ fn close_bounty_works() { let deposit: u64 = 80 + 5; - assert_eq!(last_event(), RawEvent::BountyRejected(0, deposit)); + assert_eq!(last_event(), BountiesEvent::BountyRejected(0, deposit)); assert_eq!(Balances::reserved_balance(0), 0); assert_eq!(Balances::free_balance(0), 100 - deposit); @@ -690,7 +692,7 @@ fn award_and_claim_bounty_works() { assert_ok!(Bounties::claim_bounty(Origin::signed(1), 0)); - assert_eq!(last_event(), RawEvent::BountyClaimed(0, 56, 3)); + assert_eq!(last_event(), BountiesEvent::BountyClaimed(0, 56, 3)); assert_eq!(Balances::free_balance(4), 14); // initial 10 + fee 4 @@ -729,7 +731,7 @@ fn claim_handles_high_fee() { assert_ok!(Bounties::claim_bounty(Origin::signed(1), 0)); - assert_eq!(last_event(), RawEvent::BountyClaimed(0, 0, 3)); + assert_eq!(last_event(), BountiesEvent::BountyClaimed(0, 0, 3)); assert_eq!(Balances::free_balance(4), 70); // 30 + 50 - 10 assert_eq!(Balances::free_balance(3), 0); @@ -806,7 +808,7 @@ fn award_and_cancel() { assert_ok!(Bounties::unassign_curator(Origin::root(), 0)); assert_ok!(Bounties::close_bounty(Origin::root(), 0)); - assert_eq!(last_event(), RawEvent::BountyCanceled(0)); + assert_eq!(last_event(), BountiesEvent::BountyCanceled(0)); assert_eq!(Balances::free_balance(Bounties::bounty_account_id(0)), 0); @@ -934,6 +936,48 @@ fn extend_expiry() { }); } +#[test] +fn test_migration_v4() { + let mut s = Storage::default(); + + let index: u32 = 10; + + let bounty = Bounty:: { + proposer: 0, + value: 20, + fee: 20, + curator_deposit: 20, + bond: 50, + status: BountyStatus::::Proposed, + }; + + let data = vec![ + (pallet_bounties::BountyCount::::hashed_key().to_vec(), 10.encode().to_vec()), + (pallet_bounties::Bounties::::hashed_key_for(index), bounty.encode().to_vec()), + (pallet_bounties::BountyDescriptions::::hashed_key_for(index), vec![0, 0]), + ( + pallet_bounties::BountyApprovals::::hashed_key().to_vec(), + vec![10 as u32].encode().to_vec(), + ), + ]; + + s.top = data.into_iter().collect(); + + sp_io::TestExternalities::new(s).execute_with(|| { + use frame_support::traits::PalletInfo; + let old_pallet_name = ::PalletInfo::name::() + .expect("Bounties is part of runtime, so it has a name; qed"); + let new_pallet_name = "NewBounties"; + + crate::migrations::v4::pre_migration::(old_pallet_name, new_pallet_name); + crate::migrations::v4::migrate::(old_pallet_name, new_pallet_name); + crate::migrations::v4::post_migration::( + old_pallet_name, + new_pallet_name, + ); + }); +} + #[test] fn genesis_funding_works() { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); diff --git a/frame/collective/src/benchmarking.rs b/frame/collective/src/benchmarking.rs index c7e695babf27d..c26a2b43f5b75 100644 --- a/frame/collective/src/benchmarking.rs +++ b/frame/collective/src/benchmarking.rs @@ -23,9 +23,7 @@ use crate::Pallet as Collective; use sp_runtime::traits::Bounded; use sp_std::mem::size_of; -use frame_benchmarking::{ - account, benchmarks_instance_pallet, impl_benchmark_test_suite, whitelisted_caller, -}; +use frame_benchmarking::{account, benchmarks_instance_pallet, whitelisted_caller}; use frame_system::{Call as SystemCall, Pallet as System, RawOrigin as SystemOrigin}; const SEED: u32 = 0; @@ -638,6 +636,6 @@ benchmarks_instance_pallet! { assert_eq!(Collective::::proposals().len(), (p - 1) as usize); assert_last_event::(Event::Disapproved(last_hash).into()); } -} -impl_benchmark_test_suite!(Collective, crate::tests::new_test_ext(), crate::tests::Test); + impl_benchmark_test_suite!(Collective, crate::tests::new_test_ext(), crate::tests::Test); +} diff --git a/frame/contracts/src/benchmarking/mod.rs b/frame/contracts/src/benchmarking/mod.rs index db657e618322e..e382e616f27fd 100644 --- a/frame/contracts/src/benchmarking/mod.rs +++ b/frame/contracts/src/benchmarking/mod.rs @@ -36,7 +36,7 @@ use crate::{ Pallet as Contracts, *, }; use codec::Encode; -use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_benchmarking::{account, benchmarks, whitelisted_caller}; use frame_support::weights::Weight; use frame_system::RawOrigin; use pwasm_utils::parity_wasm::elements::{BlockType, BrTableData, Instruction, ValueType}; @@ -2241,7 +2241,7 @@ benchmarks! { ); } #[cfg(not(feature = "std"))] - return Err("Run this bench with a native runtime in order to see the schedule.".into()); + Err("Run this bench with a native runtime in order to see the schedule.")?; }: {} // Execute one erc20 transfer using the ink! erc20 example contract. @@ -2325,10 +2325,10 @@ benchmarks! { ) .result?; } -} -impl_benchmark_test_suite!( - Contracts, - crate::tests::ExtBuilder::default().build(), - crate::tests::Test, -); + impl_benchmark_test_suite!( + Contracts, + crate::tests::ExtBuilder::default().build(), + crate::tests::Test, + ) +} diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index cc468466c2922..7fa0b0b274449 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -660,7 +660,10 @@ where } // Deposit an instantiation event. - deposit_event::(vec![], Event::Instantiated(self.caller().clone(), account_id)); + deposit_event::( + vec![], + Event::Instantiated { deployer: self.caller().clone(), contract: account_id }, + ); } Ok(output) @@ -942,10 +945,10 @@ where )?; ContractInfoOf::::remove(&frame.account_id); E::remove_user(info.code_hash, &mut frame.nested_meter)?; - Contracts::::deposit_event(Event::Terminated( - frame.account_id.clone(), - beneficiary.clone(), - )); + Contracts::::deposit_event(Event::Terminated { + contract: frame.account_id.clone(), + beneficiary: beneficiary.clone(), + }); Ok(()) } @@ -997,7 +1000,7 @@ where fn deposit_event(&mut self, topics: Vec, data: Vec) { deposit_event::( topics, - Event::ContractEmitted(self.top_frame().account_id.clone(), data), + Event::ContractEmitted { contract: self.top_frame().account_id.clone(), data }, ); } @@ -1662,7 +1665,10 @@ mod tests { Storage::::code_hash(&instantiated_contract_address).unwrap(), dummy_ch ); - assert_eq!(&events(), &[Event::Instantiated(ALICE, instantiated_contract_address)]); + assert_eq!( + &events(), + &[Event::Instantiated { deployer: ALICE, contract: instantiated_contract_address }] + ); }); } @@ -1751,7 +1757,10 @@ mod tests { Storage::::code_hash(&instantiated_contract_address).unwrap(), dummy_ch ); - assert_eq!(&events(), &[Event::Instantiated(BOB, instantiated_contract_address)]); + assert_eq!( + &events(), + &[Event::Instantiated { deployer: BOB, contract: instantiated_contract_address }] + ); }); } diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 77efcc6986e64..62b74b9b7b954 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -104,7 +104,7 @@ pub use crate::{ schedule::{HostFnWeights, InstructionWeights, Limits, Schedule}, }; use crate::{ - exec::{Executable, Stack as ExecStack}, + exec::{AccountIdOf, ExecError, Executable, Stack as ExecStack}, gas::GasMeter, storage::{ContractInfo, DeletedContract, Storage}, wasm::PrefabWasmModule, @@ -112,13 +112,14 @@ use crate::{ }; use frame_support::{ dispatch::Dispatchable, + ensure, traits::{Contains, Currency, Get, Randomness, StorageVersion, Time}, weights::{GetDispatchInfo, PostDispatchInfo, Weight}, }; use frame_system::Pallet as System; use pallet_contracts_primitives::{ - Code, ContractAccessError, ContractExecResult, ContractInstantiateResult, GetStorageResult, - InstantiateReturnValue, + Code, ContractAccessError, ContractExecResult, ContractInstantiateResult, ExecReturnValue, + GetStorageResult, InstantiateReturnValue, }; use sp_core::{crypto::UncheckedFrom, Bytes}; use sp_runtime::traits::{Convert, Hash, Saturating, StaticLookup}; @@ -272,18 +273,8 @@ pub mod pallet { ) -> DispatchResultWithPostInfo { let origin = ensure_signed(origin)?; let dest = T::Lookup::lookup(dest)?; - let mut gas_meter = GasMeter::new(gas_limit); - let schedule = T::Schedule::get(); - let result = ExecStack::>::run_call( - origin, - dest, - &mut gas_meter, - &schedule, - value, - data, - None, - ); - gas_meter.into_dispatch_result(result, T::WeightInfo::call()) + let output = Self::internal_call(origin, dest, value, gas_limit, data, None); + output.gas_meter.into_dispatch_result(output.result, T::WeightInfo::call()) } /// Instantiates a new contract from the supplied `code` optionally transferring @@ -325,26 +316,19 @@ pub mod pallet { ) -> DispatchResultWithPostInfo { let origin = ensure_signed(origin)?; let code_len = code.len() as u32; - ensure!(code_len <= T::Schedule::get().limits.code_len, Error::::CodeTooLarge); - let mut gas_meter = GasMeter::new(gas_limit); - let schedule = T::Schedule::get(); - let executable = PrefabWasmModule::from_code(code, &schedule)?; - let code_len = executable.code_len(); - ensure!(code_len <= T::Schedule::get().limits.code_len, Error::::CodeTooLarge); - let result = ExecStack::>::run_instantiate( + let salt_len = salt.len() as u32; + let output = Self::internal_instantiate( origin, - executable, - &mut gas_meter, - &schedule, endowment, + gas_limit, + Code::Upload(Bytes(code)), data, - &salt, + salt, None, - ) - .map(|(_address, output)| output); - gas_meter.into_dispatch_result( - result, - T::WeightInfo::instantiate_with_code(code_len / 1024, salt.len() as u32 / 1024), + ); + output.gas_meter.into_dispatch_result( + output.result.map(|(_address, result)| result), + T::WeightInfo::instantiate_with_code(code_len / 1024, salt_len / 1024), ) } @@ -365,71 +349,64 @@ pub mod pallet { salt: Vec, ) -> DispatchResultWithPostInfo { let origin = ensure_signed(origin)?; - let mut gas_meter = GasMeter::new(gas_limit); - let schedule = T::Schedule::get(); - let executable = PrefabWasmModule::from_storage(code_hash, &schedule, &mut gas_meter)?; - let result = ExecStack::>::run_instantiate( + let salt_len = salt.len() as u32; + let output = Self::internal_instantiate( origin, - executable, - &mut gas_meter, - &schedule, endowment, + gas_limit, + Code::Existing(code_hash), data, - &salt, + salt, None, + ); + output.gas_meter.into_dispatch_result( + output.result.map(|(_address, output)| output), + T::WeightInfo::instantiate(salt_len / 1024), ) - .map(|(_address, output)| output); - gas_meter - .into_dispatch_result(result, T::WeightInfo::instantiate(salt.len() as u32 / 1024)) } } #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - /// Contract deployed by address at the specified address. \[deployer, contract\] - Instantiated(T::AccountId, T::AccountId), + /// Contract deployed by address at the specified address. + Instantiated { deployer: T::AccountId, contract: T::AccountId }, /// Contract has been removed. - /// \[contract, beneficiary\] - /// - /// # Params - /// - /// - `contract`: The contract that was terminated. - /// - `beneficiary`: The account that received the contracts remaining balance. /// /// # Note /// /// The only way for a contract to be removed and emitting this event is by calling /// `seal_terminate`. - Terminated(T::AccountId, T::AccountId), + Terminated { + /// The contract that was terminated. + contract: T::AccountId, + /// The account that received the contracts remaining balance + beneficiary: T::AccountId, + }, - /// Code with the specified hash has been stored. \[code_hash\] - CodeStored(T::Hash), + /// Code with the specified hash has been stored. + CodeStored { code_hash: T::Hash }, /// Triggered when the current schedule is updated. - /// \[version\] - /// - /// # Params - /// - /// - `version`: The version of the newly set schedule. - ScheduleUpdated(u32), + ScheduleUpdated { + /// The version of the newly set schedule. + version: u32, + }, /// A custom event emitted by the contract. - /// \[contract, data\] - /// - /// # Params - /// - /// - `contract`: The contract that emitted the event. - /// - `data`: Data supplied by the contract. Metadata generated during contract compilation - /// is needed to decode it. - ContractEmitted(T::AccountId, Vec), + ContractEmitted { + /// The contract that emitted the event. + contract: T::AccountId, + /// Data supplied by the contract. Metadata generated during contract compilation + /// is needed to decode it. + data: Vec, + }, /// A code with the specified hash was removed. - /// \[code_hash\] /// /// This happens when the last contract that uses this code hash was removed. - CodeRemoved(T::Hash), + CodeRemoved { code_hash: T::Hash }, } #[pallet::error] @@ -535,6 +512,20 @@ pub mod pallet { pub(crate) type DeletionQueue = StorageValue<_, Vec, ValueQuery>; } +/// Return type of the private [`Pallet::internal_call`] function. +type InternalCallOutput = InternalOutput; + +/// Return type of the private [`Pallet::internal_instantiate`] function. +type InternalInstantiateOutput = InternalOutput, ExecReturnValue)>; + +/// Return type of private helper functions. +struct InternalOutput { + /// The gas meter that was used to execute the call. + gas_meter: GasMeter, + /// The result of the call. + result: Result, +} + impl Pallet where T::AccountId: UncheckedFrom + AsRef<[u8]>, @@ -556,25 +547,16 @@ where dest: T::AccountId, value: BalanceOf, gas_limit: Weight, - input_data: Vec, + data: Vec, debug: bool, ) -> ContractExecResult { - let mut gas_meter = GasMeter::new(gas_limit); - let schedule = T::Schedule::get(); let mut debug_message = if debug { Some(Vec::new()) } else { None }; - let result = ExecStack::>::run_call( - origin, - dest, - &mut gas_meter, - &schedule, - value, - input_data, - debug_message.as_mut(), - ); + let output = + Self::internal_call(origin, dest, value, gas_limit, data, debug_message.as_mut()); ContractExecResult { - result: result.map_err(|r| r.error), - gas_consumed: gas_meter.gas_consumed(), - gas_required: gas_meter.gas_required(), + result: output.result.map_err(|r| r.error), + gas_consumed: output.gas_meter.gas_consumed(), + gas_required: output.gas_meter.gas_required(), debug_message: debug_message.unwrap_or_default(), } } @@ -601,38 +583,23 @@ where salt: Vec, debug: bool, ) -> ContractInstantiateResult { - let mut gas_meter = GasMeter::new(gas_limit); - let schedule = T::Schedule::get(); - let executable = match code { - Code::Upload(Bytes(binary)) => PrefabWasmModule::from_code(binary, &schedule), - Code::Existing(hash) => PrefabWasmModule::from_storage(hash, &schedule, &mut gas_meter), - }; - let executable = match executable { - Ok(executable) => executable, - Err(error) => - return ContractInstantiateResult { - result: Err(error.into()), - gas_consumed: gas_meter.gas_consumed(), - gas_required: gas_meter.gas_required(), - debug_message: Vec::new(), - }, - }; let mut debug_message = if debug { Some(Vec::new()) } else { None }; - let result = ExecStack::>::run_instantiate( + let output = Self::internal_instantiate( origin, - executable, - &mut gas_meter, - &schedule, endowment, + gas_limit, + code, data, - &salt, + salt, debug_message.as_mut(), - ) - .and_then(|(account_id, result)| Ok(InstantiateReturnValue { result, account_id })); + ); ContractInstantiateResult { - result: result.map_err(|e| e.error), - gas_consumed: gas_meter.gas_consumed(), - gas_required: gas_meter.gas_required(), + result: output + .result + .map(|(account_id, result)| InstantiateReturnValue { result, account_id }) + .map_err(|e| e.error), + gas_consumed: output.gas_meter.gas_consumed(), + gas_required: output.gas_meter.gas_required(), debug_message: debug_message.unwrap_or_default(), } } @@ -709,4 +676,74 @@ where ) -> frame_support::dispatch::DispatchResult { self::wasm::reinstrument(module, schedule) } + + /// Internal function that does the actual call. + /// + /// Called by dispatchables and public functions. + fn internal_call( + origin: T::AccountId, + dest: T::AccountId, + value: BalanceOf, + gas_limit: Weight, + data: Vec, + debug_message: Option<&mut Vec>, + ) -> InternalCallOutput { + let mut gas_meter = GasMeter::new(gas_limit); + let schedule = T::Schedule::get(); + let result = ExecStack::>::run_call( + origin, + dest, + &mut gas_meter, + &schedule, + value, + data, + debug_message, + ); + InternalCallOutput { gas_meter, result } + } + + /// Internal function that does the actual instantiation. + /// + /// Called by dispatchables and public functions. + fn internal_instantiate( + origin: T::AccountId, + endowment: BalanceOf, + gas_limit: Weight, + code: Code>, + data: Vec, + salt: Vec, + debug_message: Option<&mut Vec>, + ) -> InternalInstantiateOutput { + let mut gas_meter = GasMeter::new(gas_limit); + let schedule = T::Schedule::get(); + let try_exec = || { + let executable = match code { + Code::Upload(Bytes(binary)) => { + ensure!( + binary.len() as u32 <= schedule.limits.code_len, + >::CodeTooLarge + ); + let executable = PrefabWasmModule::from_code(binary, &schedule)?; + ensure!( + executable.code_len() <= schedule.limits.code_len, + >::CodeTooLarge + ); + executable + }, + Code::Existing(hash) => + PrefabWasmModule::from_storage(hash, &schedule, &mut gas_meter)?, + }; + ExecStack::>::run_instantiate( + origin, + executable, + &mut gas_meter, + &schedule, + endowment, + data, + &salt, + debug_message, + ) + }; + InternalInstantiateOutput { result: try_exec(), gas_meter } + } } diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index f5b95c192c42e..b2141ca18b0b1 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -478,20 +478,25 @@ fn instantiate_and_call_and_deposit_event() { }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts(crate::Event::CodeStored(code_hash.into())), + event: Event::Contracts(crate::Event::CodeStored { + code_hash: code_hash.into() + }), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts(crate::Event::ContractEmitted( - addr.clone(), - vec![1, 2, 3, 4] - )), + event: Event::Contracts(crate::Event::ContractEmitted { + contract: addr.clone(), + data: vec![1, 2, 3, 4] + }), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts(crate::Event::Instantiated(ALICE, addr.clone())), + event: Event::Contracts(crate::Event::Instantiated { + deployer: ALICE, + contract: addr.clone() + }), topics: vec![], }, ] @@ -764,12 +769,15 @@ fn self_destruct_works() { }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts(crate::Event::CodeRemoved(code_hash)), + event: Event::Contracts(crate::Event::CodeRemoved { code_hash }), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts(crate::Event::Terminated(addr.clone(), DJANGO)), + event: Event::Contracts(crate::Event::Terminated { + contract: addr.clone(), + beneficiary: DJANGO + }), topics: vec![], }, ], diff --git a/frame/contracts/src/wasm/code_cache.rs b/frame/contracts/src/wasm/code_cache.rs index 08a7449683ed6..afb68d4d81179 100644 --- a/frame/contracts/src/wasm/code_cache.rs +++ b/frame/contracts/src/wasm/code_cache.rs @@ -59,7 +59,7 @@ where Some(module) => increment_64(&mut module.refcount), None => { *existing = Some(prefab_module); - Contracts::::deposit_event(Event::CodeStored(code_hash)) + Contracts::::deposit_event(Event::CodeStored { code_hash }) }, }); } @@ -170,7 +170,7 @@ where T::AccountId: UncheckedFrom + AsRef<[u8]>, { >::remove(code_hash); - Contracts::::deposit_event(Event::CodeRemoved(code_hash)) + Contracts::::deposit_event(Event::CodeRemoved { code_hash }) } /// Increment the refcount panicking if it should ever overflow (which will not happen). diff --git a/frame/democracy/src/benchmarking.rs b/frame/democracy/src/benchmarking.rs index 7d4d7aee140b9..34bcb0da301e6 100644 --- a/frame/democracy/src/benchmarking.rs +++ b/frame/democracy/src/benchmarking.rs @@ -19,9 +19,10 @@ use super::*; -use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelist_account}; +use frame_benchmarking::{account, benchmarks, whitelist_account}; use frame_support::{ assert_noop, assert_ok, + codec::Decode, traits::{ schedule::DispatchTime, Currency, EnsureOrigin, Get, OnInitialize, UnfilteredDispatchable, }, @@ -69,7 +70,7 @@ fn add_referendum(n: u32) -> Result { let referendum_index: ReferendumIndex = ReferendumCount::::get() - 1; T::Scheduler::schedule_named( (DEMOCRACY_ID, referendum_index).encode(), - DispatchTime::At(1u32.into()), + DispatchTime::At(2u32.into()), None, 63, frame_system::RawOrigin::Root.into(), @@ -194,9 +195,8 @@ benchmarks! { emergency_cancel { let origin = T::CancellationOrigin::successful_origin(); let referendum_index = add_referendum::(0)?; - let call = Call::::emergency_cancel { ref_index: referendum_index }; assert_ok!(Democracy::::referendum_status(referendum_index)); - }: { call.dispatch_bypass_filter(origin)? } + }: _(origin, referendum_index) verify { // Referendum has been canceled assert_noop!( @@ -219,14 +219,11 @@ benchmarks! { assert_ok!( Democracy::::external_propose(T::ExternalOrigin::successful_origin(), hash.clone()) ); - + let origin = T::BlacklistOrigin::successful_origin(); // Add a referendum of our proposal. let referendum_index = add_referendum::(0)?; assert_ok!(Democracy::::referendum_status(referendum_index)); - - let call = Call::::blacklist { proposal_hash: hash, maybe_ref_index: Some(referendum_index) }; - let origin = T::BlacklistOrigin::successful_origin(); - }: { call.dispatch_bypass_filter(origin)? } + }: _(origin, hash, Some(referendum_index)) verify { // Referendum has been canceled assert_noop!( @@ -246,9 +243,7 @@ benchmarks! { proposal_hash, (T::BlockNumber::zero(), vec![T::AccountId::default(); v as usize]) ); - - let call = Call::::external_propose { proposal_hash }; - }: { call.dispatch_bypass_filter(origin)? } + }: _(origin, proposal_hash) verify { // External proposal created ensure!(>::exists(), "External proposal didn't work"); @@ -257,8 +252,7 @@ benchmarks! { external_propose_majority { let origin = T::ExternalMajorityOrigin::successful_origin(); let proposal_hash = T::Hashing::hash_of(&0); - let call = Call::::external_propose_majority { proposal_hash }; - }: { call.dispatch_bypass_filter(origin)? } + }: _(origin, proposal_hash) verify { // External proposal created ensure!(>::exists(), "External proposal didn't work"); @@ -267,8 +261,7 @@ benchmarks! { external_propose_default { let origin = T::ExternalDefaultOrigin::successful_origin(); let proposal_hash = T::Hashing::hash_of(&0); - let call = Call::::external_propose_default { proposal_hash }; - }: { call.dispatch_bypass_filter(origin)? } + }: _(origin, proposal_hash) verify { // External proposal created ensure!(>::exists(), "External proposal didn't work"); @@ -283,13 +276,7 @@ benchmarks! { let origin_fast_track = T::FastTrackOrigin::successful_origin(); let voting_period = T::FastTrackVotingPeriod::get(); let delay = 0u32; - let call = Call::::fast_track { - proposal_hash, - voting_period: voting_period.into(), - delay: delay.into() - }; - - }: { call.dispatch_bypass_filter(origin_fast_track)? } + }: _(origin_fast_track, proposal_hash, voting_period.into(), delay.into()) verify { assert_eq!(Democracy::::referendum_count(), 1, "referendum not created") } @@ -310,10 +297,9 @@ benchmarks! { vetoers.sort(); Blacklist::::insert(proposal_hash, (T::BlockNumber::zero(), vetoers)); - let call = Call::::veto_external { proposal_hash }; let origin = T::VetoOrigin::successful_origin(); ensure!(NextExternal::::get().is_some(), "no external proposal"); - }: { call.dispatch_bypass_filter(origin)? } + }: _(origin, proposal_hash) verify { assert!(NextExternal::::get().is_none()); let (_, new_vetoers) = >::get(&proposal_hash).ok_or("no blacklist")?; @@ -436,7 +422,39 @@ benchmarks! { assert_eq!(Democracy::::referendum_count(), r, "referenda not created"); assert_eq!(Democracy::::lowest_unbaked(), 0, "invalid referenda init"); - }: { Democracy::::on_initialize(0u32.into()) } + }: { Democracy::::on_initialize(1u32.into()) } + verify { + // All should be on going + for i in 0 .. r { + if let Some(value) = ReferendumInfoOf::::get(i) { + match value { + ReferendumInfo::Finished { .. } => return Err("Referendum has been finished".into()), + ReferendumInfo::Ongoing(_) => (), + } + } + } + } + + on_initialize_base_with_launch_period { + let r in 1 .. MAX_REFERENDUMS; + + for i in 0..r { + add_referendum::(i)?; + } + + for (key, mut info) in ReferendumInfoOf::::iter() { + if let ReferendumInfo::Ongoing(ref mut status) = info { + status.end += 100u32.into(); + } + ReferendumInfoOf::::insert(key, info); + } + + assert_eq!(Democracy::::referendum_count(), r, "referenda not created"); + assert_eq!(Democracy::::lowest_unbaked(), 0, "invalid referenda init"); + + let block_number = T::LaunchPeriod::get(); + + }: { Democracy::::on_initialize(block_number) } verify { // All should be on going for i in 0 .. r { @@ -774,12 +792,20 @@ benchmarks! { Some(PreimageStatus::Available { .. }) => (), _ => return Err("preimage not available".into()) } + let origin = RawOrigin::Root.into(); + let call = Call::::enact_proposal { proposal_hash, index: 0 }.encode(); }: { assert_eq!( - Democracy::::enact_proposal(RawOrigin::Root.into(), proposal_hash, 0), + as Decode>::decode(&mut &*call) + .expect("call is encoded above, encoding must be correct") + .dispatch_bypass_filter(origin), Err(Error::::PreimageInvalid.into()) ); } -} -impl_benchmark_test_suite!(Democracy, crate::tests::new_test_ext(), crate::tests::Test); + impl_benchmark_test_suite!( + Democracy, + crate::tests::new_test_ext(), + crate::tests::Test + ); +} diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index 8bc6921c4f8ad..50b245006fa24 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -149,7 +149,7 @@ //! - `cancel_queued` - Cancels a proposal that is queued for enactment. //! - `clear_public_proposal` - Removes all public proposals. -#![recursion_limit = "128"] +#![recursion_limit = "256"] #![cfg_attr(not(feature = "std"), no_std)] use codec::{Decode, Encode, Input}; @@ -1726,7 +1726,8 @@ impl Pallet { /// /// /// # - /// If a referendum is launched or maturing, this will take full block weight. Otherwise: + /// If a referendum is launched or maturing, this will take full block weight if queue is not + /// empty. Otherwise: /// - Complexity: `O(R)` where `R` is the number of unbaked referenda. /// - Db reads: `LastTabledWasExternal`, `NextExternal`, `PublicProps`, `account`, /// `ReferendumCount`, `LowestUnbaked` @@ -1737,18 +1738,24 @@ impl Pallet { let max_block_weight = T::BlockWeights::get().max_block; let mut weight = 0; + let next = Self::lowest_unbaked(); + let last = Self::referendum_count(); + let r = last.saturating_sub(next); + // pick out another public referendum if it's time. if (now % T::LaunchPeriod::get()).is_zero() { - // Errors come from the queue being empty. we don't really care about that, and even if - // we did, there is nothing we can do here. - let _ = Self::launch_next(now); - weight = max_block_weight; + // Errors come from the queue being empty. If the queue is not empty, it will take + // full block weight. + if Self::launch_next(now).is_ok() { + weight = max_block_weight; + } else { + weight = + weight.saturating_add(T::WeightInfo::on_initialize_base_with_launch_period(r)); + } + } else { + weight = weight.saturating_add(T::WeightInfo::on_initialize_base(r)); } - let next = Self::lowest_unbaked(); - let last = Self::referendum_count(); - let r = last.saturating_sub(next); - weight = weight.saturating_add(T::WeightInfo::on_initialize_base(r)); // tally up votes for any expiring referenda. for (index, info) in Self::maturing_referenda_at_inner(now, next..last).into_iter() { let approved = Self::bake_referendum(now, index, info)?; diff --git a/frame/democracy/src/weights.rs b/frame/democracy/src/weights.rs index e3f22f4fc0ab3..638852d3c7e19 100644 --- a/frame/democracy/src/weights.rs +++ b/frame/democracy/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_democracy //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2021-08-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2021-09-30, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -60,6 +60,7 @@ pub trait WeightInfo { fn cancel_referendum() -> Weight; fn cancel_queued(r: u32, ) -> Weight; fn on_initialize_base(r: u32, ) -> Weight; + fn on_initialize_base_with_launch_period(r: u32, ) -> Weight; fn delegate(r: u32, ) -> Weight; fn undelegate(r: u32, ) -> Weight; fn clear_public_proposals() -> Weight; @@ -80,15 +81,15 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy Blacklist (r:1 w:0) // Storage: Democracy DepositOf (r:0 w:1) fn propose() -> Weight { - (65_665_000 as Weight) + (67_388_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Democracy DepositOf (r:1 w:1) fn second(s: u32, ) -> Weight { - (40_003_000 as Weight) - // Standard Error: 1_000 - .saturating_add((180_000 as Weight).saturating_mul(s as Weight)) + (41_157_000 as Weight) + // Standard Error: 0 + .saturating_add((157_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -96,9 +97,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vote_new(r: u32, ) -> Weight { - (45_465_000 as Weight) + (46_406_000 as Weight) // Standard Error: 1_000 - .saturating_add((220_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((170_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } @@ -106,16 +107,16 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vote_existing(r: u32, ) -> Weight { - (45_112_000 as Weight) + (46_071_000 as Weight) // Standard Error: 1_000 - .saturating_add((222_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((166_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy Cancellations (r:1 w:1) fn emergency_cancel() -> Weight { - (26_651_000 as Weight) + (27_699_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } @@ -126,45 +127,45 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) fn blacklist(p: u32, ) -> Weight { - (77_737_000 as Weight) + (82_703_000 as Weight) // Standard Error: 4_000 - .saturating_add((512_000 as Weight).saturating_mul(p as Weight)) + .saturating_add((500_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(6 as Weight)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:0) fn external_propose(v: u32, ) -> Weight { - (13_126_000 as Weight) + (13_747_000 as Weight) // Standard Error: 0 - .saturating_add((89_000 as Weight).saturating_mul(v as Weight)) + .saturating_add((76_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_majority() -> Weight { - (2_923_000 as Weight) + (3_070_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_default() -> Weight { - (2_889_000 as Weight) + (3_080_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:1) // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn fast_track() -> Weight { - (27_598_000 as Weight) + (29_129_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:1) fn veto_external(v: u32, ) -> Weight { - (28_416_000 as Weight) + (30_105_000 as Weight) // Standard Error: 0 - .saturating_add((132_000 as Weight).saturating_mul(v as Weight)) + .saturating_add((104_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } @@ -172,36 +173,46 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) fn cancel_proposal(p: u32, ) -> Weight { - (52_836_000 as Weight) - // Standard Error: 2_000 - .saturating_add((478_000 as Weight).saturating_mul(p as Weight)) + (55_228_000 as Weight) + // Standard Error: 1_000 + .saturating_add((457_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn cancel_referendum() -> Weight { - (16_891_000 as Weight) + (17_319_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Scheduler Lookup (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn cancel_queued(r: u32, ) -> Weight { - (30_504_000 as Weight) - // Standard Error: 2_000 - .saturating_add((1_480_000 as Weight).saturating_mul(r as Weight)) + (29_738_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_153_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } - // Storage: Democracy LastTabledWasExternal (r:1 w:0) - // Storage: Democracy NextExternal (r:1 w:0) - // Storage: Democracy PublicProps (r:1 w:0) // Storage: Democracy LowestUnbaked (r:1 w:0) // Storage: Democracy ReferendumCount (r:1 w:0) // Storage: Democracy ReferendumInfoOf (r:1 w:0) fn on_initialize_base(r: u32, ) -> Weight { - (6_259_000 as Weight) + (2_165_000 as Weight) + // Standard Error: 3_000 + .saturating_add((5_577_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) + } + // Storage: Democracy LowestUnbaked (r:1 w:0) + // Storage: Democracy ReferendumCount (r:1 w:0) + // Storage: Democracy LastTabledWasExternal (r:1 w:0) + // Storage: Democracy NextExternal (r:1 w:0) + // Storage: Democracy PublicProps (r:1 w:0) + // Storage: Democracy ReferendumInfoOf (r:1 w:0) + fn on_initialize_base_with_launch_period(r: u32, ) -> Weight { + (9_396_000 as Weight) // Standard Error: 4_000 - .saturating_add((5_032_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((5_604_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) } @@ -209,9 +220,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn delegate(r: u32, ) -> Weight { - (51_719_000 as Weight) - // Standard Error: 5_000 - .saturating_add((7_210_000 as Weight).saturating_mul(r as Weight)) + (57_783_000 as Weight) + // Standard Error: 4_000 + .saturating_add((7_623_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(4 as Weight)) @@ -220,9 +231,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy VotingOf (r:2 w:2) // Storage: Democracy ReferendumInfoOf (r:1 w:1) fn undelegate(r: u32, ) -> Weight { - (23_203_000 as Weight) - // Standard Error: 5_000 - .saturating_add((7_206_000 as Weight).saturating_mul(r as Weight)) + (26_027_000 as Weight) + // Standard Error: 4_000 + .saturating_add((7_593_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) @@ -230,31 +241,31 @@ impl WeightInfo for SubstrateWeight { } // Storage: Democracy PublicProps (r:0 w:1) fn clear_public_proposals() -> Weight { - (3_127_000 as Weight) + (2_780_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Democracy Preimages (r:1 w:1) fn note_preimage(b: u32, ) -> Weight { - (44_130_000 as Weight) + (46_416_000 as Weight) // Standard Error: 0 - .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) + .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Democracy Preimages (r:1 w:1) fn note_imminent_preimage(b: u32, ) -> Weight { - (28_756_000 as Weight) + (29_735_000 as Weight) // Standard Error: 0 - .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) + .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Democracy Preimages (r:1 w:1) // Storage: System Account (r:1 w:0) fn reap_preimage(b: u32, ) -> Weight { - (39_922_000 as Weight) + (41_276_000 as Weight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) + .saturating_add((1_000 as Weight).saturating_mul(b as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -262,9 +273,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn unlock_remove(r: u32, ) -> Weight { - (38_621_000 as Weight) + (40_348_000 as Weight) // Standard Error: 1_000 - .saturating_add((110_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((60_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } @@ -272,27 +283,27 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn unlock_set(r: u32, ) -> Weight { - (36_631_000 as Weight) + (37_475_000 as Weight) // Standard Error: 1_000 - .saturating_add((214_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((151_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) fn remove_vote(r: u32, ) -> Weight { - (21_025_000 as Weight) + (19_970_000 as Weight) // Standard Error: 1_000 - .saturating_add((195_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((153_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) fn remove_other_vote(r: u32, ) -> Weight { - (20_628_000 as Weight) + (20_094_000 as Weight) // Standard Error: 1_000 - .saturating_add((214_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((157_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } @@ -305,15 +316,15 @@ impl WeightInfo for () { // Storage: Democracy Blacklist (r:1 w:0) // Storage: Democracy DepositOf (r:0 w:1) fn propose() -> Weight { - (65_665_000 as Weight) + (67_388_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Democracy DepositOf (r:1 w:1) fn second(s: u32, ) -> Weight { - (40_003_000 as Weight) - // Standard Error: 1_000 - .saturating_add((180_000 as Weight).saturating_mul(s as Weight)) + (41_157_000 as Weight) + // Standard Error: 0 + .saturating_add((157_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -321,9 +332,9 @@ impl WeightInfo for () { // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vote_new(r: u32, ) -> Weight { - (45_465_000 as Weight) + (46_406_000 as Weight) // Standard Error: 1_000 - .saturating_add((220_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((170_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } @@ -331,16 +342,16 @@ impl WeightInfo for () { // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vote_existing(r: u32, ) -> Weight { - (45_112_000 as Weight) + (46_071_000 as Weight) // Standard Error: 1_000 - .saturating_add((222_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((166_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy Cancellations (r:1 w:1) fn emergency_cancel() -> Weight { - (26_651_000 as Weight) + (27_699_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } @@ -351,45 +362,45 @@ impl WeightInfo for () { // Storage: Democracy DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) fn blacklist(p: u32, ) -> Weight { - (77_737_000 as Weight) + (82_703_000 as Weight) // Standard Error: 4_000 - .saturating_add((512_000 as Weight).saturating_mul(p as Weight)) + .saturating_add((500_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(6 as Weight)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:0) fn external_propose(v: u32, ) -> Weight { - (13_126_000 as Weight) + (13_747_000 as Weight) // Standard Error: 0 - .saturating_add((89_000 as Weight).saturating_mul(v as Weight)) + .saturating_add((76_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_majority() -> Weight { - (2_923_000 as Weight) + (3_070_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_default() -> Weight { - (2_889_000 as Weight) + (3_080_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:1) // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn fast_track() -> Weight { - (27_598_000 as Weight) + (29_129_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:1) fn veto_external(v: u32, ) -> Weight { - (28_416_000 as Weight) + (30_105_000 as Weight) // Standard Error: 0 - .saturating_add((132_000 as Weight).saturating_mul(v as Weight)) + .saturating_add((104_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } @@ -397,36 +408,46 @@ impl WeightInfo for () { // Storage: Democracy DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) fn cancel_proposal(p: u32, ) -> Weight { - (52_836_000 as Weight) - // Standard Error: 2_000 - .saturating_add((478_000 as Weight).saturating_mul(p as Weight)) + (55_228_000 as Weight) + // Standard Error: 1_000 + .saturating_add((457_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn cancel_referendum() -> Weight { - (16_891_000 as Weight) + (17_319_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Scheduler Lookup (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn cancel_queued(r: u32, ) -> Weight { - (30_504_000 as Weight) - // Standard Error: 2_000 - .saturating_add((1_480_000 as Weight).saturating_mul(r as Weight)) + (29_738_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_153_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } - // Storage: Democracy LastTabledWasExternal (r:1 w:0) - // Storage: Democracy NextExternal (r:1 w:0) - // Storage: Democracy PublicProps (r:1 w:0) // Storage: Democracy LowestUnbaked (r:1 w:0) // Storage: Democracy ReferendumCount (r:1 w:0) // Storage: Democracy ReferendumInfoOf (r:1 w:0) fn on_initialize_base(r: u32, ) -> Weight { - (6_259_000 as Weight) + (2_165_000 as Weight) + // Standard Error: 3_000 + .saturating_add((5_577_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) + } + // Storage: Democracy LowestUnbaked (r:1 w:0) + // Storage: Democracy ReferendumCount (r:1 w:0) + // Storage: Democracy LastTabledWasExternal (r:1 w:0) + // Storage: Democracy NextExternal (r:1 w:0) + // Storage: Democracy PublicProps (r:1 w:0) + // Storage: Democracy ReferendumInfoOf (r:1 w:0) + fn on_initialize_base_with_launch_period(r: u32, ) -> Weight { + (9_396_000 as Weight) // Standard Error: 4_000 - .saturating_add((5_032_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((5_604_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) } @@ -434,9 +455,9 @@ impl WeightInfo for () { // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn delegate(r: u32, ) -> Weight { - (51_719_000 as Weight) - // Standard Error: 5_000 - .saturating_add((7_210_000 as Weight).saturating_mul(r as Weight)) + (57_783_000 as Weight) + // Standard Error: 4_000 + .saturating_add((7_623_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) @@ -445,9 +466,9 @@ impl WeightInfo for () { // Storage: Democracy VotingOf (r:2 w:2) // Storage: Democracy ReferendumInfoOf (r:1 w:1) fn undelegate(r: u32, ) -> Weight { - (23_203_000 as Weight) - // Standard Error: 5_000 - .saturating_add((7_206_000 as Weight).saturating_mul(r as Weight)) + (26_027_000 as Weight) + // Standard Error: 4_000 + .saturating_add((7_593_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) @@ -455,31 +476,31 @@ impl WeightInfo for () { } // Storage: Democracy PublicProps (r:0 w:1) fn clear_public_proposals() -> Weight { - (3_127_000 as Weight) + (2_780_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Democracy Preimages (r:1 w:1) fn note_preimage(b: u32, ) -> Weight { - (44_130_000 as Weight) + (46_416_000 as Weight) // Standard Error: 0 - .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) + .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Democracy Preimages (r:1 w:1) fn note_imminent_preimage(b: u32, ) -> Weight { - (28_756_000 as Weight) + (29_735_000 as Weight) // Standard Error: 0 - .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) + .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Democracy Preimages (r:1 w:1) // Storage: System Account (r:1 w:0) fn reap_preimage(b: u32, ) -> Weight { - (39_922_000 as Weight) + (41_276_000 as Weight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) + .saturating_add((1_000 as Weight).saturating_mul(b as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -487,9 +508,9 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn unlock_remove(r: u32, ) -> Weight { - (38_621_000 as Weight) + (40_348_000 as Weight) // Standard Error: 1_000 - .saturating_add((110_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((60_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } @@ -497,27 +518,27 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn unlock_set(r: u32, ) -> Weight { - (36_631_000 as Weight) + (37_475_000 as Weight) // Standard Error: 1_000 - .saturating_add((214_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((151_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) fn remove_vote(r: u32, ) -> Weight { - (21_025_000 as Weight) + (19_970_000 as Weight) // Standard Error: 1_000 - .saturating_add((195_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((153_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) fn remove_other_vote(r: u32, ) -> Weight { - (20_628_000 as Weight) + (20_094_000 as Weight) // Standard Error: 1_000 - .saturating_add((214_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((157_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } diff --git a/frame/election-provider-multi-phase/src/benchmarking.rs b/frame/election-provider-multi-phase/src/benchmarking.rs index fb5adda52e166..9648b8e0f2465 100644 --- a/frame/election-provider-multi-phase/src/benchmarking.rs +++ b/frame/election-provider-multi-phase/src/benchmarking.rs @@ -19,7 +19,7 @@ use super::*; use crate::{unsigned::IndexAssignmentOf, Pallet as MultiPhase}; -use frame_benchmarking::{account, impl_benchmark_test_suite}; +use frame_benchmarking::account; use frame_support::{assert_ok, traits::Hooks}; use frame_system::RawOrigin; use rand::{prelude::SliceRandom, rngs::SmallRng, SeedableRng}; @@ -243,10 +243,10 @@ frame_benchmarking::benchmarks! { } create_snapshot_internal { - // number of votes in snapshot. Fixed to maximum. - let v = T::BenchmarkingConfig::SNAPSHOT_MAXIMUM_VOTERS; - // number of targets in snapshot. Fixed to maximum. - let t = T::BenchmarkingConfig::MAXIMUM_TARGETS; + // number of votes in snapshot. + let v in (T::BenchmarkingConfig::VOTERS[0]) .. T::BenchmarkingConfig::VOTERS[1]; + // number of targets in snapshot. + let t in (T::BenchmarkingConfig::TARGETS[0]) .. T::BenchmarkingConfig::TARGETS[1]; // we don't directly need the data-provider to be populated, but it is just easy to use it. set_up_data_provider::(v, t); @@ -350,25 +350,8 @@ frame_benchmarking::benchmarks! { assert!(>::queued_solution().is_none()); >::put(Phase::Unsigned((true, 1u32.into()))); - - // encode the most significant storage item that needs to be decoded in the dispatch. - let encoded_snapshot = >::snapshot().ok_or("missing snapshot")?.encode(); - let encoded_call = Call::::submit_unsigned { - raw_solution: Box::new(raw_solution.clone()), - witness - }.encode(); - }: { - assert_ok!( - >::submit_unsigned( - RawOrigin::None.into(), - Box::new(raw_solution), - witness, - ) - ); - let _decoded_snap = as Decode>::decode(&mut &*encoded_snapshot) - .expect("decoding should not fail; qed."); - let _decoded_call = as Decode>::decode(&mut &*encoded_call).expect("decoding should not fail; qed."); - } verify { + }: _(RawOrigin::None, Box::new(raw_solution), witness) + verify { assert!(>::queued_solution().is_some()); } @@ -389,13 +372,8 @@ frame_benchmarking::benchmarks! { assert_eq!(raw_solution.solution.voter_count() as u32, a); assert_eq!(raw_solution.solution.unique_targets().len() as u32, d); - - // encode the most significant storage item that needs to be decoded in the dispatch. - let encoded_snapshot = >::snapshot().ok_or("snapshot missing")?.encode(); }: { assert_ok!(>::feasibility_check(raw_solution, ElectionCompute::Unsigned)); - let _decoded_snap = as Decode>::decode(&mut &*encoded_snapshot) - .expect("decoding should not fail; qed."); } // NOTE: this weight is not used anywhere, but the fact that it should succeed when execution in @@ -519,10 +497,10 @@ frame_benchmarking::benchmarks! { log!(trace, "actual encoded size = {}", encoding.len()); assert!(encoding.len() <= desired_size); } -} -impl_benchmark_test_suite!( - MultiPhase, - crate::mock::ExtBuilder::default().build_offchainify(10).0, - crate::mock::Runtime, -); + impl_benchmark_test_suite!( + MultiPhase, + crate::mock::ExtBuilder::default().build_offchainify(10).0, + crate::mock::Runtime, + ); +} diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index 269057b55b094..6b0329afc0d77 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -620,6 +620,15 @@ pub mod pallet { #[pallet::constant] type SignedDepositWeight: Get>; + /// The maximum number of voters to put in the snapshot. At the moment, snapshots are only + /// over a single block, but once multi-block elections are introduced they will take place + /// over multiple blocks. + /// + /// Also, note the data type: If the voters are represented by a `u32` in `type + /// CompactSolution`, the same `u32` is used here to ensure bounds are respected. + #[pallet::constant] + type VoterSnapshotPerBlock: Get>; + /// Handler for the slashed deposits. type SlashHandler: OnUnbalanced>; @@ -1274,7 +1283,8 @@ impl Pallet { fn create_snapshot_external( ) -> Result<(Vec, Vec>, u32), ElectionError> { let target_limit = >::max_value().saturated_into::(); - let voter_limit = >::max_value().saturated_into::(); + // for now we have just a single block snapshot. + let voter_limit = T::VoterSnapshotPerBlock::get().saturated_into::(); let targets = T::DataProvider::targets(Some(target_limit)).map_err(ElectionError::DataProvider)?; @@ -1307,8 +1317,10 @@ impl Pallet { let (targets, voters, desired_targets) = Self::create_snapshot_external()?; // ..therefore we only measure the weight of this and add it. + let internal_weight = + T::WeightInfo::create_snapshot_internal(voters.len() as u32, targets.len() as u32); Self::create_snapshot_internal(targets, voters, desired_targets); - Self::register_weight(T::WeightInfo::create_snapshot_internal()); + Self::register_weight(internal_weight); Ok(()) } @@ -1933,7 +1945,8 @@ mod tests { } #[test] - fn snapshot_creation_fails_if_too_big() { + fn snapshot_too_big_failure_onchain_fallback() { + // the `MockStaking` is designed such that if it has too many targets, it simply fails. ExtBuilder::default().build_and_execute(|| { Targets::set((0..(TargetIndex::max_value() as AccountId) + 1).collect::>()); @@ -1949,6 +1962,49 @@ mod tests { roll_to(29); let supports = MultiPhase::elect().unwrap(); assert!(supports.len() > 0); + }); + } + + #[test] + fn snapshot_too_big_failure_no_fallback() { + // and if the backup mode is nothing, we go into the emergency mode.. + ExtBuilder::default().onchain_fallback(false).build_and_execute(|| { + crate::mock::Targets::set( + (0..(TargetIndex::max_value() as AccountId) + 1).collect::>(), + ); + + // Signed phase failed to open. + roll_to(15); + assert_eq!(MultiPhase::current_phase(), Phase::Off); + + // Unsigned phase failed to open. + roll_to(25); + assert_eq!(MultiPhase::current_phase(), Phase::Off); + + roll_to(29); + let err = MultiPhase::elect().unwrap_err(); + assert_eq!(err, ElectionError::Fallback("NoFallback.")); + assert_eq!(MultiPhase::current_phase(), Phase::Emergency); + }); + } + + #[test] + fn snapshot_too_big_truncate() { + // but if there are too many voters, we simply truncate them. + ExtBuilder::default().build_and_execute(|| { + // we have 8 voters in total. + assert_eq!(crate::mock::Voters::get().len(), 8); + // but we want to take 2. + crate::mock::VoterSnapshotPerBlock::set(2); + + // Signed phase opens just fine. + roll_to(15); + assert_eq!(MultiPhase::current_phase(), Phase::Signed); + + assert_eq!( + MultiPhase::snapshot_metadata().unwrap(), + SolutionOrSnapshotSize { voters: 2, targets: 4 } + ); }) } diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index 28a15291e6520..1a65316be1f10 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -268,6 +268,7 @@ parameter_types! { pub static MinerMaxWeight: Weight = BlockWeights::get().max_block; pub static MinerMaxLength: u32 = 256; pub static MockWeightInfo: bool = false; + pub static VoterSnapshotPerBlock: VoterIndex = u32::max_value(); pub static EpochLength: u64 = 30; pub static OnChianFallback: bool = true; @@ -303,11 +304,11 @@ impl multi_phase::weights::WeightInfo for DualMockWeightInfo { <() as multi_phase::weights::WeightInfo>::on_initialize_nothing() } } - fn create_snapshot_internal() -> Weight { + fn create_snapshot_internal(v: u32, t: u32) -> Weight { if MockWeightInfo::get() { Zero::zero() } else { - <() as multi_phase::weights::WeightInfo>::create_snapshot_internal() + <() as multi_phase::weights::WeightInfo>::create_snapshot_internal(v, t) } } fn on_initialize_open_signed() -> Weight { @@ -401,6 +402,7 @@ impl crate::Config for Runtime { type Fallback = MockFallback; type ForceOrigin = frame_system::EnsureRoot; type Solution = TestNposSolution; + type VoterSnapshotPerBlock = VoterSnapshotPerBlock; type Solver = SequentialPhragmen, Balancing>; } @@ -433,9 +435,9 @@ impl ElectionDataProvider for StakingMock { fn voters( maybe_max_len: Option, ) -> data_provider::Result)>> { - let voters = Voters::get(); - if maybe_max_len.map_or(false, |max_len| voters.len() > max_len) { - return Err("Voters too big") + let mut voters = Voters::get(); + if let Some(max_len) = maybe_max_len { + voters.truncate(max_len) } Ok(voters) diff --git a/frame/election-provider-multi-phase/src/unsigned.rs b/frame/election-provider-multi-phase/src/unsigned.rs index af0b79177d86c..31ad502ac076e 100644 --- a/frame/election-provider-multi-phase/src/unsigned.rs +++ b/frame/election-provider-multi-phase/src/unsigned.rs @@ -651,7 +651,7 @@ mod max_weight { fn elect_queued(a: u32, d: u32) -> Weight { unreachable!() } - fn create_snapshot_internal() -> Weight { + fn create_snapshot_internal(v: u32, t: u32) -> Weight { unreachable!() } fn on_initialize_nothing() -> Weight { diff --git a/frame/election-provider-multi-phase/src/weights.rs b/frame/election-provider-multi-phase/src/weights.rs index 262838bcb9e70..4d49f60fabfc3 100644 --- a/frame/election-provider-multi-phase/src/weights.rs +++ b/frame/election-provider-multi-phase/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_election_provider_multi_phase //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2021-08-18, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2021-09-22, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -50,7 +50,7 @@ pub trait WeightInfo { fn on_initialize_open_unsigned() -> Weight; fn finalize_signed_phase_accept_solution() -> Weight; fn finalize_signed_phase_reject_solution() -> Weight; - fn create_snapshot_internal() -> Weight; + fn create_snapshot_internal(v: u32, t: u32, ) -> Weight; fn elect_queued(a: u32, d: u32, ) -> Weight; fn submit(c: u32, ) -> Weight; fn submit_unsigned(v: u32, t: u32, a: u32, d: u32, ) -> Weight; @@ -69,41 +69,45 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking ForceEra (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) fn on_initialize_nothing() -> Weight { - (23_878_000 as Weight) + (22_784_000 as Weight) .saturating_add(T::DbWeight::get().reads(8 as Weight)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn on_initialize_open_signed() -> Weight { - (34_547_000 as Weight) + (32_763_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn on_initialize_open_unsigned() -> Weight { - (33_568_000 as Weight) + (29_117_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:1) // Storage: ElectionProviderMultiPhase QueuedSolution (r:0 w:1) fn finalize_signed_phase_accept_solution() -> Weight { - (50_596_000 as Weight) + (48_996_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: System Account (r:1 w:1) fn finalize_signed_phase_reject_solution() -> Weight { - (33_389_000 as Weight) + (32_508_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: ElectionProviderMultiPhase SnapshotMetadata (r:0 w:1) // Storage: ElectionProviderMultiPhase DesiredTargets (r:0 w:1) // Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) - fn create_snapshot_internal() -> Weight { - (8_835_233_000 as Weight) + fn create_snapshot_internal(v: u32, t: u32, ) -> Weight { + (96_001_000 as Weight) + // Standard Error: 1_000 + .saturating_add((307_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 2_000 + .saturating_add((133_000 as Weight).saturating_mul(t as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: ElectionProviderMultiPhase SignedSubmissionIndices (r:1 w:1) @@ -116,11 +120,11 @@ impl WeightInfo for SubstrateWeight { // Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn elect_queued(a: u32, d: u32, ) -> Weight { - (82_395_000 as Weight) - // Standard Error: 1_000 - .saturating_add((1_769_000 as Weight).saturating_mul(a as Weight)) - // Standard Error: 13_000 - .saturating_add((320_000 as Weight).saturating_mul(d as Weight)) + (100_505_000 as Weight) + // Standard Error: 6_000 + .saturating_add((1_665_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 10_000 + .saturating_add((443_000 as Weight).saturating_mul(d as Weight)) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().writes(8 as Weight)) } @@ -131,9 +135,9 @@ impl WeightInfo for SubstrateWeight { // Storage: ElectionProviderMultiPhase SignedSubmissionNextIndex (r:1 w:1) // Storage: ElectionProviderMultiPhase SignedSubmissionsMap (r:0 w:1) fn submit(c: u32, ) -> Weight { - (77_368_000 as Weight) - // Standard Error: 9_000 - .saturating_add((369_000 as Weight).saturating_mul(c as Weight)) + (74_088_000 as Weight) + // Standard Error: 59_000 + .saturating_add((187_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } @@ -146,14 +150,14 @@ impl WeightInfo for SubstrateWeight { // Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) fn submit_unsigned(v: u32, t: u32, a: u32, d: u32, ) -> Weight { (0 as Weight) - // Standard Error: 4_000 - .saturating_add((3_553_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 23_000 - .saturating_add((35_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 7_000 - .saturating_add((10_600_000 as Weight).saturating_mul(a as Weight)) - // Standard Error: 59_000 - .saturating_add((6_128_000 as Weight).saturating_mul(d as Weight)) + // Standard Error: 5_000 + .saturating_add((1_970_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 10_000 + .saturating_add((173_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 18_000 + .saturating_add((9_783_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 27_000 + .saturating_add((2_224_000 as Weight).saturating_mul(d as Weight)) .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -161,14 +165,16 @@ impl WeightInfo for SubstrateWeight { // Storage: ElectionProviderMultiPhase DesiredTargets (r:1 w:0) // Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) // Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) - fn feasibility_check(v: u32, _t: u32, a: u32, d: u32, ) -> Weight { + fn feasibility_check(v: u32, t: u32, a: u32, d: u32, ) -> Weight { (0 as Weight) // Standard Error: 3_000 - .saturating_add((3_478_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 6_000 - .saturating_add((8_930_000 as Weight).saturating_mul(a as Weight)) - // Standard Error: 47_000 - .saturating_add((5_199_000 as Weight).saturating_mul(d as Weight)) + .saturating_add((1_910_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 7_000 + .saturating_add((111_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 13_000 + .saturating_add((7_741_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 19_000 + .saturating_add((1_844_000 as Weight).saturating_mul(d as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) } } @@ -184,41 +190,45 @@ impl WeightInfo for () { // Storage: Staking ForceEra (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) fn on_initialize_nothing() -> Weight { - (23_878_000 as Weight) + (22_784_000 as Weight) .saturating_add(RocksDbWeight::get().reads(8 as Weight)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn on_initialize_open_signed() -> Weight { - (34_547_000 as Weight) + (32_763_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn on_initialize_open_unsigned() -> Weight { - (33_568_000 as Weight) + (29_117_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:1) // Storage: ElectionProviderMultiPhase QueuedSolution (r:0 w:1) fn finalize_signed_phase_accept_solution() -> Weight { - (50_596_000 as Weight) + (48_996_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: System Account (r:1 w:1) fn finalize_signed_phase_reject_solution() -> Weight { - (33_389_000 as Weight) + (32_508_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: ElectionProviderMultiPhase SnapshotMetadata (r:0 w:1) // Storage: ElectionProviderMultiPhase DesiredTargets (r:0 w:1) // Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) - fn create_snapshot_internal() -> Weight { - (8_835_233_000 as Weight) + fn create_snapshot_internal(v: u32, t: u32, ) -> Weight { + (96_001_000 as Weight) + // Standard Error: 1_000 + .saturating_add((307_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 2_000 + .saturating_add((133_000 as Weight).saturating_mul(t as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: ElectionProviderMultiPhase SignedSubmissionIndices (r:1 w:1) @@ -231,11 +241,11 @@ impl WeightInfo for () { // Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn elect_queued(a: u32, d: u32, ) -> Weight { - (82_395_000 as Weight) - // Standard Error: 1_000 - .saturating_add((1_769_000 as Weight).saturating_mul(a as Weight)) - // Standard Error: 13_000 - .saturating_add((320_000 as Weight).saturating_mul(d as Weight)) + (100_505_000 as Weight) + // Standard Error: 6_000 + .saturating_add((1_665_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 10_000 + .saturating_add((443_000 as Weight).saturating_mul(d as Weight)) .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().writes(8 as Weight)) } @@ -246,9 +256,9 @@ impl WeightInfo for () { // Storage: ElectionProviderMultiPhase SignedSubmissionNextIndex (r:1 w:1) // Storage: ElectionProviderMultiPhase SignedSubmissionsMap (r:0 w:1) fn submit(c: u32, ) -> Weight { - (77_368_000 as Weight) - // Standard Error: 9_000 - .saturating_add((369_000 as Weight).saturating_mul(c as Weight)) + (74_088_000 as Weight) + // Standard Error: 59_000 + .saturating_add((187_000 as Weight).saturating_mul(c as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } @@ -261,14 +271,14 @@ impl WeightInfo for () { // Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) fn submit_unsigned(v: u32, t: u32, a: u32, d: u32, ) -> Weight { (0 as Weight) - // Standard Error: 4_000 - .saturating_add((3_553_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 23_000 - .saturating_add((35_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 7_000 - .saturating_add((10_600_000 as Weight).saturating_mul(a as Weight)) - // Standard Error: 59_000 - .saturating_add((6_128_000 as Weight).saturating_mul(d as Weight)) + // Standard Error: 5_000 + .saturating_add((1_970_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 10_000 + .saturating_add((173_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 18_000 + .saturating_add((9_783_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 27_000 + .saturating_add((2_224_000 as Weight).saturating_mul(d as Weight)) .saturating_add(RocksDbWeight::get().reads(7 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -276,14 +286,16 @@ impl WeightInfo for () { // Storage: ElectionProviderMultiPhase DesiredTargets (r:1 w:0) // Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) // Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) - fn feasibility_check(v: u32, _t: u32, a: u32, d: u32, ) -> Weight { + fn feasibility_check(v: u32, t: u32, a: u32, d: u32, ) -> Weight { (0 as Weight) // Standard Error: 3_000 - .saturating_add((3_478_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 6_000 - .saturating_add((8_930_000 as Weight).saturating_mul(a as Weight)) - // Standard Error: 47_000 - .saturating_add((5_199_000 as Weight).saturating_mul(d as Weight)) + .saturating_add((1_910_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 7_000 + .saturating_add((111_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 13_000 + .saturating_add((7_741_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 19_000 + .saturating_add((1_844_000 as Weight).saturating_mul(d as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) } } diff --git a/frame/election-provider-support/src/lib.rs b/frame/election-provider-support/src/lib.rs index d2c4b1053cc6d..cb36e025c3bee 100644 --- a/frame/election-provider-support/src/lib.rs +++ b/frame/election-provider-support/src/lib.rs @@ -297,6 +297,75 @@ impl ElectionProvider for () { } } +/// A utility trait for something to implement `ElectionDataProvider` in a sensible way. +/// +/// This is generic over `AccountId` and it can represent a validator, a nominator, or any other +/// entity. +/// +/// To simplify the trait, the `VoteWeight` is hardcoded as the weight of each entity. The weights +/// are ascending, the higher, the better. In the long term, if this trait ends up having use cases +/// outside of the election context, it is easy enough to make it generic over the `VoteWeight`. +/// +/// Something that implements this trait will do a best-effort sort over ids, and thus can be +/// used on the implementing side of [`ElectionDataProvider`]. +pub trait SortedListProvider { + /// The list's error type. + type Error; + + /// An iterator over the list, which can have `take` called on it. + fn iter() -> Box>; + + /// The current count of ids in the list. + fn count() -> u32; + + /// Return true if the list already contains `id`. + fn contains(id: &AccountId) -> bool; + + /// Hook for inserting a new id. + fn on_insert(id: AccountId, weight: VoteWeight) -> Result<(), Self::Error>; + + /// Hook for updating a single id. + fn on_update(id: &AccountId, weight: VoteWeight); + + /// Hook for removing am id from the list. + fn on_remove(id: &AccountId); + + /// Regenerate this list from scratch. Returns the count of items inserted. + /// + /// This should typically only be used at a runtime upgrade. + fn regenerate( + all: impl IntoIterator, + weight_of: Box VoteWeight>, + ) -> u32; + + /// Remove `maybe_count` number of items from the list. Returns the number of items actually + /// removed. WARNING: removes all items if `maybe_count` is `None`, which should never be done + /// in production settings because it can lead to an unbounded amount of storage accesses. + fn clear(maybe_count: Option) -> u32; + + /// Sanity check internal state of list. Only meant for debug compilation. + fn sanity_check() -> Result<(), &'static str>; + + /// If `who` changes by the returned amount they are guaranteed to have a worst case change + /// in their list position. + #[cfg(feature = "runtime-benchmarks")] + fn weight_update_worst_case(_who: &AccountId, _is_increase: bool) -> VoteWeight { + VoteWeight::MAX + } +} + +/// Something that can provide the `VoteWeight` of an account. Similar to [`ElectionProvider`] and +/// [`ElectionDataProvider`], this should typically be implementing by whoever is supposed to *use* +/// `SortedListProvider`. +pub trait VoteWeightProvider { + /// Get the current `VoteWeight` of `who`. + fn vote_weight(who: &AccountId) -> VoteWeight; + + /// For tests and benchmarks, set the `VoteWeight`. + #[cfg(any(feature = "runtime-benchmarks", test))] + fn set_vote_weight_of(_: &AccountId, _: VoteWeight) {} +} + /// Something that can compute the result to an NPoS solution. pub trait NposSolver { /// The account identifier type of this solver. diff --git a/frame/elections-phragmen/src/benchmarking.rs b/frame/elections-phragmen/src/benchmarking.rs index 7cb83b3dd7799..9bc63848607ab 100644 --- a/frame/elections-phragmen/src/benchmarking.rs +++ b/frame/elections-phragmen/src/benchmarking.rs @@ -21,10 +21,11 @@ use super::*; -use frame_benchmarking::{ - account, benchmarks, impl_benchmark_test_suite, whitelist, BenchmarkError, BenchmarkResult, +use frame_benchmarking::{account, benchmarks, whitelist, BenchmarkError, BenchmarkResult}; +use frame_support::{ + dispatch::{DispatchResultWithPostInfo, UnfilteredDispatchable}, + traits::OnInitialize, }; -use frame_support::{dispatch::DispatchResultWithPostInfo, traits::OnInitialize}; use frame_system::RawOrigin; use crate::Pallet as Elections; @@ -401,15 +402,23 @@ benchmarks! { let _ = fill_seats_up_to::(m)?; let removing = as_lookup::(>::members_ids()[0].clone()); + let who = T::Lookup::lookup(removing.clone()).expect("member was added above"); + let call = Call::::remove_member { who: removing, has_replacement: false }.encode(); }: { assert_eq!( - >::remove_member(RawOrigin::Root.into(), removing, false).unwrap_err().error, + as Decode>::decode(&mut &*call) + .expect("call is encoded above, encoding must be correct") + .dispatch_bypass_filter(RawOrigin::Root.into()) + .unwrap_err() + .error, Error::::InvalidReplacement.into(), ); } verify { // must still have enough members. assert_eq!(>::members().len() as u32, T::DesiredMembers::get()); + // on fail, `who` must still be a member + assert!(>::members_ids().contains(&who)); #[cfg(test)] { // reset members in between benchmark tests. @@ -538,11 +547,11 @@ benchmarks! { MEMBERS.with(|m| *m.borrow_mut() = vec![]); } } -} -impl_benchmark_test_suite!( - Elections, - crate::tests::ExtBuilder::default().desired_members(13).desired_runners_up(7), - crate::tests::Test, - exec_name = build_and_execute, -); + impl_benchmark_test_suite!( + Elections, + crate::tests::ExtBuilder::default().desired_members(13).desired_runners_up(7), + crate::tests::Test, + exec_name = build_and_execute, + ); +} diff --git a/frame/example/src/benchmarking.rs b/frame/example/src/benchmarking.rs index cdf6c152a4880..9f2bb20fe63ac 100644 --- a/frame/example/src/benchmarking.rs +++ b/frame/example/src/benchmarking.rs @@ -20,7 +20,7 @@ #![cfg(feature = "runtime-benchmarks")] use crate::*; -use frame_benchmarking::{benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_benchmarking::{benchmarks, whitelisted_caller}; use frame_system::RawOrigin; // To actually run this benchmark on pallet-example, we need to put this pallet into the @@ -65,12 +65,14 @@ benchmarks! { // The benchmark execution phase could also be a closure with custom code m.sort(); } -} -// This line generates test cases for benchmarking, and could be run by: -// `cargo test -p pallet-example --all-features`, you will see an additional line of: -// `test benchmarking::benchmark_tests::test_benchmarks ... ok` in the result. -// -// The line generates three steps per benchmark, with repeat=1 and the three steps are -// [low, mid, high] of the range. -impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::tests::Test); + // This line generates test cases for benchmarking, and could be run by: + // `cargo test -p pallet-example --all-features`, you will see one line per case: + // `test benchmarking::bench_sort_vector ... ok` + // `test benchmarking::bench_accumulate_dummy ... ok` + // `test benchmarking::bench_set_dummy_benchmark ... ok` in the result. + // + // The line generates three steps per benchmark, with repeat=1 and the three steps are + // [low, mid, high] of the range. + impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::tests::Test) +} diff --git a/frame/example/src/lib.rs b/frame/example/src/lib.rs index 3f56b57dac8ca..23c4951c1a603 100644 --- a/frame/example/src/lib.rs +++ b/frame/example/src/lib.rs @@ -605,6 +605,9 @@ pub mod pallet { #[pallet::getter(fn foo)] pub(super) type Foo = StorageValue<_, T::Balance, ValueQuery>; + #[pallet::storage] + pub type CountedMap = CountedStorageMap<_, Blake2_128Concat, u8, u16>; + // The genesis config type. #[pallet::genesis_config] pub struct GenesisConfig { diff --git a/frame/example/src/tests.rs b/frame/example/src/tests.rs index 87c2404f5b100..4c2274572db81 100644 --- a/frame/example/src/tests.rs +++ b/frame/example/src/tests.rs @@ -180,6 +180,15 @@ fn signed_ext_watch_dummy_works() { }) } +#[test] +fn counted_map_works() { + new_test_ext().execute_with(|| { + assert_eq!(CountedMap::::count(), 0); + CountedMap::::insert(3, 3); + assert_eq!(CountedMap::::count(), 1); + }) +} + #[test] fn weights_work() { // must have a defined weight. diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index 655a38fe1b540..41f679909e6fd 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -220,6 +220,37 @@ where weight } + /// Execute given block, but don't do any of the [`final_checks`]. + /// + /// Should only be used for testing. + #[cfg(feature = "try-runtime")] + pub fn execute_block_no_check(block: Block) -> frame_support::weights::Weight { + Self::initialize_block(block.header()); + Self::initial_checks(&block); + + let (header, extrinsics) = block.deconstruct(); + + Self::execute_extrinsics_with_book_keeping(extrinsics, *header.number()); + + // do some of the checks that would normally happen in `final_checks`, but definitely skip + // the state root check. + { + let new_header = >::finalize(); + let items_zip = header.digest().logs().iter().zip(new_header.digest().logs().iter()); + for (header_item, computed_item) in items_zip { + header_item.check_equal(&computed_item); + assert!(header_item == computed_item, "Digest item must match that calculated."); + } + + assert!( + header.extrinsics_root() == new_header.extrinsics_root(), + "Transaction trie root must be valid.", + ); + } + + frame_system::Pallet::::block_weight().total() + } + /// Execute all `OnRuntimeUpgrade` of this runtime, including the pre and post migration checks. /// /// This should only be used for testing. @@ -229,7 +260,7 @@ where (frame_system::Pallet::, COnRuntimeUpgrade, AllPallets) as OnRuntimeUpgrade - >::pre_upgrade()?; + >::pre_upgrade().unwrap(); let weight = Self::execute_on_runtime_upgrade(); @@ -237,7 +268,7 @@ where (frame_system::Pallet::, COnRuntimeUpgrade, AllPallets) as OnRuntimeUpgrade - >::post_upgrade()?; + >::post_upgrade().unwrap(); Ok(weight) } @@ -544,15 +575,9 @@ where #[cfg(test)] mod tests { use super::*; - use frame_support::{ - assert_err, parameter_types, - traits::{Currency, LockIdentifier, LockableCurrency, WithdrawReasons}, - weights::{IdentityFee, RuntimeDbWeight, Weight, WeightToFeePolynomial}, - }; - use frame_system::{Call as SystemCall, ChainContext, LastRuntimeUpgradeInfo}; + use hex_literal::hex; - use pallet_balances::Call as BalancesCall; - use pallet_transaction_payment::CurrencyAdapter; + use sp_core::H256; use sp_runtime::{ generic::{DigestItem, Era}, @@ -563,95 +588,135 @@ mod tests { }, DispatchError, }; + + use frame_support::{ + assert_err, parameter_types, + traits::{Currency, LockIdentifier, LockableCurrency, WithdrawReasons}, + weights::{IdentityFee, RuntimeDbWeight, Weight, WeightToFeePolynomial}, + }; + use frame_system::{Call as SystemCall, ChainContext, LastRuntimeUpgradeInfo}; + use pallet_balances::Call as BalancesCall; + use pallet_transaction_payment::CurrencyAdapter; + const TEST_KEY: &[u8] = &*b":test:key:"; + #[frame_support::pallet] mod custom { - use frame_support::weights::{DispatchClass, Weight}; - use sp_runtime::transaction_validity::{ - TransactionSource, TransactionValidity, TransactionValidityError, UnknownTransaction, - }; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + #[pallet::config] pub trait Config: frame_system::Config {} - frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin { - #[weight = 100] - fn some_function(origin) { - // NOTE: does not make any different. - frame_system::ensure_signed(origin)?; - } - #[weight = (200, DispatchClass::Operational)] - fn some_root_operation(origin) { - frame_system::ensure_root(origin)?; - } - #[weight = 0] - fn some_unsigned_message(origin) { - frame_system::ensure_none(origin)?; - } + #[pallet::hooks] + impl Hooks> for Pallet { + // module hooks. + // one with block number arg and one without + fn on_initialize(n: T::BlockNumber) -> Weight { + println!("on_initialize({})", n); + 175 + } - #[weight = 0] - fn allowed_unsigned(origin) { - frame_system::ensure_root(origin)?; - } + fn on_idle(n: T::BlockNumber, remaining_weight: Weight) -> Weight { + println!("on_idle{}, {})", n, remaining_weight); + 175 + } - #[weight = 0] - fn unallowed_unsigned(origin) { - frame_system::ensure_root(origin)?; - } + fn on_finalize(n: T::BlockNumber) { + println!("on_finalize({})", n); + } - #[weight = 0] - fn inherent_call(origin) { - let _ = frame_system::ensure_none(origin)?; - } + fn on_runtime_upgrade() -> Weight { + sp_io::storage::set(super::TEST_KEY, "module".as_bytes()); + 200 + } - // module hooks. - // one with block number arg and one without - fn on_initialize(n: T::BlockNumber) -> Weight { - println!("on_initialize({})", n); - 175 - } + fn offchain_worker(n: T::BlockNumber) { + assert_eq!(T::BlockNumber::from(1u32), n); + } + } - fn on_idle(n: T::BlockNumber, remaining_weight: Weight) -> Weight { - println!("on_idle{}, {})", n, remaining_weight); - 175 - } + #[pallet::call] + impl Pallet { + #[pallet::weight(100)] + pub fn some_function(origin: OriginFor) -> DispatchResult { + // NOTE: does not make any different. + frame_system::ensure_signed(origin)?; + Ok(()) + } - fn on_finalize() { - println!("on_finalize(?)"); - } + #[pallet::weight((200, DispatchClass::Operational))] + pub fn some_root_operation(origin: OriginFor) -> DispatchResult { + frame_system::ensure_root(origin)?; + Ok(()) + } - fn on_runtime_upgrade() -> Weight { - sp_io::storage::set(super::TEST_KEY, "module".as_bytes()); - 200 - } + #[pallet::weight(0)] + pub fn some_unsigned_message(origin: OriginFor) -> DispatchResult { + frame_system::ensure_none(origin)?; + Ok(()) + } - fn offchain_worker(n: T::BlockNumber) { - assert_eq!(T::BlockNumber::from(1u32), n); - } + #[pallet::weight(0)] + pub fn allowed_unsigned(origin: OriginFor) -> DispatchResult { + frame_system::ensure_root(origin)?; + Ok(()) + } - #[weight = 0] - fn calculate_storage_root(_origin) { - let root = sp_io::storage::root(); - sp_io::storage::set("storage_root".as_bytes(), &root); - } + #[pallet::weight(0)] + pub fn unallowed_unsigned(origin: OriginFor) -> DispatchResult { + frame_system::ensure_root(origin)?; + Ok(()) + } + + #[pallet::weight(0)] + pub fn inherent_call(origin: OriginFor) -> DispatchResult { + let _ = frame_system::ensure_none(origin)?; + Ok(()) + } + + #[pallet::weight(0)] + pub fn calculate_storage_root(_origin: OriginFor) -> DispatchResult { + let root = sp_io::storage::root(); + sp_io::storage::set("storage_root".as_bytes(), &root); + Ok(()) } } - impl frame_support::inherent::ProvideInherent for Module { + #[pallet::inherent] + impl ProvideInherent for Pallet { type Call = Call; + type Error = sp_inherents::MakeFatalError<()>; + const INHERENT_IDENTIFIER: [u8; 8] = *b"test1234"; - fn create_inherent(_data: &sp_inherents::InherentData) -> Option { + + fn create_inherent(_data: &InherentData) -> Option { None } + fn is_inherent(call: &Self::Call) -> bool { *call == Call::::inherent_call {} } } - impl sp_runtime::traits::ValidateUnsigned for Module { + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { type Call = Call; + // Inherent call is accepted for being dispatched + fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { + match call { + Call::allowed_unsigned { .. } => Ok(()), + Call::inherent_call { .. } => Ok(()), + _ => Err(UnknownTransaction::NoUnsignedValidator.into()), + } + } + // Inherent call is not validated as unsigned fn validate_unsigned( _source: TransactionSource, @@ -662,15 +727,6 @@ mod tests { _ => UnknownTransaction::NoUnsignedValidator.into(), } } - - // Inherent call is accepted for being dispatched - fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { - match call { - Call::allowed_unsigned { .. } => Ok(()), - Call::inherent_call { .. } => Ok(()), - _ => Err(UnknownTransaction::NoUnsignedValidator.into()), - } - } } } diff --git a/frame/gilt/src/benchmarking.rs b/frame/gilt/src/benchmarking.rs index 55d34a35a7ce4..9c6d22a48398d 100644 --- a/frame/gilt/src/benchmarking.rs +++ b/frame/gilt/src/benchmarking.rs @@ -20,7 +20,7 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use frame_benchmarking::{benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_benchmarking::{benchmarks, whitelisted_caller}; use frame_support::{ dispatch::UnfilteredDispatchable, traits::{Currency, EnsureOrigin, Get}, @@ -50,17 +50,12 @@ benchmarks! { place_bid_max { let caller: T::AccountId = whitelisted_caller(); + let origin = RawOrigin::Signed(caller.clone()); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); for i in 0..T::MaxQueueLen::get() { - Gilt::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinFreeze::get(), 1)?; + Gilt::::place_bid(origin.clone().into(), T::MinFreeze::get(), 1)?; } - }: { - Gilt::::place_bid( - RawOrigin::Signed(caller.clone()).into(), - T::MinFreeze::get() * BalanceOf::::from(2u32), - 1, - )? - } + }: place_bid(origin, T::MinFreeze::get() * BalanceOf::::from(2u32), 1) verify { assert_eq!(QueueTotals::::get()[0], ( T::MaxQueueLen::get(), @@ -81,9 +76,9 @@ benchmarks! { } set_target { - let call = Call::::set_target { target: Default::default() }; let origin = T::AdminOrigin::successful_origin(); - }: { call.dispatch_bypass_filter(origin)? } + }: _(origin, Default::default()) + verify {} thaw { let caller: T::AccountId = whitelisted_caller(); @@ -131,6 +126,6 @@ benchmarks! { .dispatch_bypass_filter(T::AdminOrigin::successful_origin())?; }: { Gilt::::pursue_target(q) } -} -impl_benchmark_test_suite!(Gilt, crate::mock::new_test_ext(), crate::mock::Test); + impl_benchmark_test_suite!(Gilt, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/frame/grandpa/src/benchmarking.rs b/frame/grandpa/src/benchmarking.rs index b0f70adb6061d..1e6be01ce8dbf 100644 --- a/frame/grandpa/src/benchmarking.rs +++ b/frame/grandpa/src/benchmarking.rs @@ -17,8 +17,6 @@ //! Benchmarks for the GRANDPA pallet. -#![cfg_attr(not(feature = "std"), no_std)] - use super::{Pallet as Grandpa, *}; use frame_benchmarking::benchmarks; use frame_system::RawOrigin; @@ -70,6 +68,12 @@ benchmarks! { verify { assert!(Grandpa::::stalled().is_some()); } + + impl_benchmark_test_suite!( + Pallet, + crate::mock::new_test_ext(vec![(1, 1), (2, 1), (3, 1)]), + crate::mock::Test, + ); } #[cfg(test)] @@ -77,12 +81,6 @@ mod tests { use super::*; use crate::mock::*; - frame_benchmarking::impl_benchmark_test_suite!( - Pallet, - crate::mock::new_test_ext(vec![(1, 1), (2, 1), (3, 1)]), - crate::mock::Test, - ); - #[test] fn test_generate_equivocation_report_blob() { let authorities = crate::tests::test_authorities(); diff --git a/frame/grandpa/src/lib.rs b/frame/grandpa/src/lib.rs index cd75deea770b4..687207151f4f4 100644 --- a/frame/grandpa/src/lib.rs +++ b/frame/grandpa/src/lib.rs @@ -33,7 +33,7 @@ pub use sp_finality_grandpa as fg_primitives; use sp_std::prelude::*; -use codec::{self as codec, Decode, Encode}; +use codec::{self as codec, Decode, Encode, MaxEncodedLen}; pub use fg_primitives::{AuthorityId, AuthorityList, AuthorityWeight, VersionedAuthorityList}; use fg_primitives::{ ConsensusLog, EquivocationProof, ScheduledChange, SetId, GRANDPA_AUTHORITIES_KEY, @@ -41,9 +41,11 @@ use fg_primitives::{ }; use frame_support::{ dispatch::DispatchResultWithPostInfo, + pallet_prelude::Get, storage, traits::{KeyOwnerProofSystem, OneSessionHandler, StorageVersion}, weights::{Pays, Weight}, + WeakBoundedVec, }; use sp_runtime::{generic::DigestItem, traits::Zero, DispatchResult, KeyTypeId}; use sp_session::{GetSessionNumber, GetValidatorCount}; @@ -81,6 +83,7 @@ pub mod pallet { #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] #[pallet::storage_version(STORAGE_VERSION)] + #[pallet::generate_storage_info] pub struct Pallet(_); #[pallet::config] @@ -119,6 +122,10 @@ pub mod pallet { /// Weights for this pallet. type WeightInfo: WeightInfo; + + /// Max Authorities in use + #[pallet::constant] + type MaxAuthorities: Get; } #[pallet::hooks] @@ -133,13 +140,13 @@ pub mod pallet { median, ScheduledChange { delay: pending_change.delay, - next_authorities: pending_change.next_authorities.clone(), + next_authorities: pending_change.next_authorities.to_vec(), }, )) } else { Self::deposit_log(ConsensusLog::ScheduledChange(ScheduledChange { delay: pending_change.delay, - next_authorities: pending_change.next_authorities.clone(), + next_authorities: pending_change.next_authorities.to_vec(), })); } } @@ -147,7 +154,9 @@ pub mod pallet { // enact the change if we've reached the enacting block if block_number == pending_change.scheduled_at + pending_change.delay { Self::set_grandpa_authorities(&pending_change.next_authorities); - Self::deposit_event(Event::NewAuthorities(pending_change.next_authorities)); + Self::deposit_event(Event::NewAuthorities( + pending_change.next_authorities.to_vec(), + )); >::kill(); } } @@ -291,7 +300,8 @@ pub mod pallet { /// Pending change: (signaled at, scheduled change). #[pallet::storage] #[pallet::getter(fn pending_change)] - pub(super) type PendingChange = StorageValue<_, StoredPendingChange>; + pub(super) type PendingChange = + StorageValue<_, StoredPendingChange>; /// next block number where we can force a change. #[pallet::storage] @@ -355,15 +365,25 @@ pub trait WeightInfo { fn note_stalled() -> Weight; } +/// Bounded version of `AuthorityList`, `Limit` being the bound +pub type BoundedAuthorityList = WeakBoundedVec<(AuthorityId, AuthorityWeight), Limit>; + /// A stored pending change. -#[derive(Encode, Decode, TypeInfo)] -pub struct StoredPendingChange { +/// `Limit` is the bound for `next_authorities` +#[derive(Encode, Decode, TypeInfo, MaxEncodedLen)] +#[codec(mel_bound(Limit: Get))] +#[scale_info(skip_type_params(Limit))] +pub struct StoredPendingChange +where + Limit: Get, + N: MaxEncodedLen, +{ /// The block number this was scheduled at. pub scheduled_at: N, /// The delay in blocks until it will be applied. pub delay: N, - /// The next authority set. - pub next_authorities: AuthorityList, + /// The next authority set, weakly bounded in size by `Limit`. + pub next_authorities: BoundedAuthorityList, /// If defined it means the change was forced and the given block number /// indicates the median last finalized block when the change was signaled. pub forced: Option, @@ -372,7 +392,7 @@ pub struct StoredPendingChange { /// Current state of the GRANDPA authority set. State transitions must happen in /// the same order of states defined below, e.g. `Paused` implies a prior /// `PendingPause`. -#[derive(Decode, Encode, TypeInfo)] +#[derive(Decode, Encode, TypeInfo, MaxEncodedLen)] #[cfg_attr(test, derive(Debug, PartialEq))] pub enum StoredState { /// The current authority set is live, and GRANDPA is enabled. @@ -465,6 +485,14 @@ impl Pallet { >::put(scheduled_at + in_blocks * 2u32.into()); } + let next_authorities = WeakBoundedVec::<_, T::MaxAuthorities>::force_from( + next_authorities, + Some( + "Warning: The number of authorities given is too big. \ + A runtime configuration adjustment may be needed.", + ), + ); + >::put(StoredPendingChange { delay: in_blocks, scheduled_at, diff --git a/frame/grandpa/src/mock.rs b/frame/grandpa/src/mock.rs index 26dda514516a3..4e5e44ce36e7a 100644 --- a/frame/grandpa/src/mock.rs +++ b/frame/grandpa/src/mock.rs @@ -217,6 +217,7 @@ impl pallet_staking::Config for Test { type NextNewSession = Session; type ElectionProvider = onchain::OnChainSequentialPhragmen; type GenesisElectionProvider = Self::ElectionProvider; + type SortedListProvider = pallet_staking::UseNominatorsMap; type WeightInfo = (); } @@ -229,6 +230,7 @@ impl pallet_offences::Config for Test { parameter_types! { pub const ReportLongevity: u64 = BondingDuration::get() as u64 * SessionsPerEra::get() as u64 * Period::get(); + pub const MaxAuthorities: u32 = 100; } impl Config for Test { @@ -249,6 +251,7 @@ impl Config for Test { super::EquivocationHandler; type WeightInfo = (); + type MaxAuthorities = MaxAuthorities; } pub fn grandpa_log(log: ConsensusLog) -> DigestItem { diff --git a/frame/identity/src/benchmarking.rs b/frame/identity/src/benchmarking.rs index 8bda24ddc73e1..68869a43992f9 100644 --- a/frame/identity/src/benchmarking.rs +++ b/frame/identity/src/benchmarking.rs @@ -22,7 +22,7 @@ use super::*; use crate::Pallet as Identity; -use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_benchmarking::{account, benchmarks, whitelisted_caller}; use frame_support::{ensure, traits::Get}; use frame_system::RawOrigin; use sp_runtime::traits::Bounded; @@ -411,6 +411,5 @@ benchmarks! { ensure!(!SuperOf::::contains_key(&caller), "Sub not removed"); } + impl_benchmark_test_suite!(Identity, crate::tests::new_test_ext(), crate::tests::Test); } - -impl_benchmark_test_suite!(Identity, crate::tests::new_test_ext(), crate::tests::Test); diff --git a/frame/im-online/src/benchmarking.rs b/frame/im-online/src/benchmarking.rs index 1043a97f67def..012da53a183e5 100644 --- a/frame/im-online/src/benchmarking.rs +++ b/frame/im-online/src/benchmarking.rs @@ -21,8 +21,8 @@ use super::*; -use frame_benchmarking::{benchmarks, impl_benchmark_test_suite}; -use frame_support::traits::UnfilteredDispatchable; +use frame_benchmarking::benchmarks; +use frame_support::{traits::UnfilteredDispatchable, WeakBoundedVec}; use frame_system::RawOrigin; use sp_core::{offchain::OpaqueMultiaddr, OpaquePeerId}; use sp_runtime::{ @@ -46,7 +46,9 @@ pub fn create_heartbeat( for _ in 0..k { keys.push(T::AuthorityId::generate_pair(None)); } - Keys::::put(keys.clone()); + let bounded_keys = WeakBoundedVec::<_, T::MaxKeys>::try_from(keys.clone()) + .map_err(|()| "More than the maximum number of keys provided")?; + Keys::::put(bounded_keys); let network_state = OpaqueNetworkState { peer_id: OpaquePeerId::default(), @@ -91,11 +93,13 @@ benchmarks! { let e in 1 .. MAX_EXTERNAL_ADDRESSES; let (input_heartbeat, signature) = create_heartbeat::(k, e)?; let call = Call::heartbeat { heartbeat: input_heartbeat, signature }; + let call_enc = call.encode(); }: { - ImOnline::::validate_unsigned(TransactionSource::InBlock, &call) - .map_err(<&str>::from)?; - call.dispatch_bypass_filter(RawOrigin::None.into())?; + ImOnline::::validate_unsigned(TransactionSource::InBlock, &call).map_err(<&str>::from)?; + as Decode>::decode(&mut &*call_enc) + .expect("call is encoded above, encoding must be correct") + .dispatch_bypass_filter(RawOrigin::None.into())?; } -} -impl_benchmark_test_suite!(ImOnline, crate::mock::new_test_ext(), crate::mock::Runtime); + impl_benchmark_test_suite!(ImOnline, crate::mock::new_test_ext(), crate::mock::Runtime); +} diff --git a/frame/im-online/src/lib.rs b/frame/im-online/src/lib.rs index ab4f7001574e5..2fcaed1820ff9 100644 --- a/frame/im-online/src/lib.rs +++ b/frame/im-online/src/lib.rs @@ -74,9 +74,14 @@ mod mock; mod tests; pub mod weights; -use codec::{Decode, Encode}; -use frame_support::traits::{ - EstimateNextSessionRotation, OneSessionHandler, ValidatorSet, ValidatorSetWithIdentification, +use codec::{Decode, Encode, MaxEncodedLen}; +use core::convert::TryFrom; +use frame_support::{ + traits::{ + EstimateNextSessionRotation, Get, OneSessionHandler, ValidatorSet, + ValidatorSetWithIdentification, WrapperOpaque, + }, + BoundedSlice, WeakBoundedVec, }; use frame_system::offchain::{SendTransactionTypes, SubmitTransaction}; pub use pallet::*; @@ -220,6 +225,65 @@ where pub validators_len: u32, } +/// A type that is the same as [`OpaqueNetworkState`] but with [`Vec`] replaced with +/// [`WeakBoundedVec`] where Limit is the respective size limit +/// `PeerIdEncodingLimit` represents the size limit of the encoding of `PeerId` +/// `MultiAddrEncodingLimit` represents the size limit of the encoding of `MultiAddr` +/// `AddressesLimit` represents the size limit of the vector of peers connected +#[derive(Clone, Eq, PartialEq, Encode, Decode, MaxEncodedLen, TypeInfo)] +#[codec(mel_bound(PeerIdEncodingLimit: Get, + MultiAddrEncodingLimit: Get, AddressesLimit: Get))] +#[scale_info(skip_type_params(PeerIdEncodingLimit, MultiAddrEncodingLimit, AddressesLimit))] +pub struct BoundedOpaqueNetworkState +where + PeerIdEncodingLimit: Get, + MultiAddrEncodingLimit: Get, + AddressesLimit: Get, +{ + /// PeerId of the local node in SCALE encoded. + pub peer_id: WeakBoundedVec, + /// List of addresses the node knows it can be reached as. + pub external_addresses: + WeakBoundedVec, AddressesLimit>, +} + +impl, MultiAddrEncodingLimit: Get, AddressesLimit: Get> + BoundedOpaqueNetworkState +{ + fn force_from(ons: &OpaqueNetworkState) -> Self { + let peer_id = WeakBoundedVec::<_, PeerIdEncodingLimit>::force_from( + ons.peer_id.0.clone(), + Some( + "Warning: The size of the encoding of PeerId \ + is bigger than expected. A runtime configuration \ + adjustment may be needed.", + ), + ); + + let external_addresses = WeakBoundedVec::<_, AddressesLimit>::force_from( + ons.external_addresses + .iter() + .map(|x| { + WeakBoundedVec::<_, MultiAddrEncodingLimit>::force_from( + x.0.clone(), + Some( + "Warning: The size of the encoding of MultiAddr \ + is bigger than expected. A runtime configuration \ + adjustment may be needed.", + ), + ) + }) + .collect(), + Some( + "Warning: The network has more peers than expected \ + A runtime configuration adjustment may be needed.", + ), + ); + + Self { peer_id, external_addresses } + } +} + /// A type for representing the validator id in a session. pub type ValidatorId = <::ValidatorSet as ValidatorSet< ::AccountId, @@ -251,6 +315,7 @@ pub mod pallet { #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] + #[pallet::generate_storage_info] pub struct Pallet(_); #[pallet::config] @@ -261,7 +326,18 @@ pub mod pallet { + RuntimeAppPublic + Default + Ord - + MaybeSerializeDeserialize; + + MaybeSerializeDeserialize + + MaxEncodedLen; + + /// The maximum number of keys that can be added. + type MaxKeys: Get; + + /// The maximum number of peers to be stored in `ReceivedHeartbeats` + type MaxPeerInHeartbeats: Get; + + /// The maximum size of the encoding of `PeerId` and `MultiAddr` that are coming + /// from the hearbeat + type MaxPeerDataEncodingSize: Get; /// The overarching event type. type Event: From> + IsType<::Event>; @@ -333,14 +409,27 @@ pub mod pallet { /// The current set of keys that may issue a heartbeat. #[pallet::storage] #[pallet::getter(fn keys)] - pub(crate) type Keys = StorageValue<_, Vec, ValueQuery>; + pub(crate) type Keys = + StorageValue<_, WeakBoundedVec, ValueQuery>; - /// For each session index, we keep a mapping of `AuthIndex` to - /// `offchain::OpaqueNetworkState`. + /// For each session index, we keep a mapping of 'SessionIndex` and `AuthIndex` to + /// `WrapperOpaque`. #[pallet::storage] #[pallet::getter(fn received_heartbeats)] - pub(crate) type ReceivedHeartbeats = - StorageDoubleMap<_, Twox64Concat, SessionIndex, Twox64Concat, AuthIndex, Vec>; + pub(crate) type ReceivedHeartbeats = StorageDoubleMap< + _, + Twox64Concat, + SessionIndex, + Twox64Concat, + AuthIndex, + WrapperOpaque< + BoundedOpaqueNetworkState< + T::MaxPeerDataEncodingSize, + T::MaxPeerDataEncodingSize, + T::MaxPeerInHeartbeats, + >, + >, + >; /// For each session index, we keep a mapping of `ValidatorId` to the /// number of blocks authored by the given authority. @@ -409,11 +498,15 @@ pub mod pallet { if let (false, Some(public)) = (exists, public) { Self::deposit_event(Event::::HeartbeatReceived(public.clone())); - let network_state = heartbeat.network_state.encode(); + let network_state_bounded = BoundedOpaqueNetworkState::< + T::MaxPeerDataEncodingSize, + T::MaxPeerDataEncodingSize, + T::MaxPeerInHeartbeats, + >::force_from(&heartbeat.network_state); ReceivedHeartbeats::::insert( ¤t_session, &heartbeat.authority_index, - &network_state, + WrapperOpaque::from(network_state_bounded), ); Ok(()) @@ -739,13 +832,17 @@ impl Pallet { fn initialize_keys(keys: &[T::AuthorityId]) { if !keys.is_empty() { assert!(Keys::::get().is_empty(), "Keys are already initialized!"); - Keys::::put(keys); + let bounded_keys = >::try_from(keys) + .expect("More than the maximum number of keys provided"); + Keys::::put(bounded_keys); } } #[cfg(test)] fn set_keys(keys: Vec) { - Keys::::put(&keys) + let bounded_keys = WeakBoundedVec::<_, T::MaxKeys>::try_from(keys) + .expect("More than the maximum number of keys provided"); + Keys::::put(bounded_keys); } } @@ -776,7 +873,15 @@ impl OneSessionHandler for Pallet { >::put(block_number + half_session); // Remember who the authorities are for the new session. - Keys::::put(validators.map(|x| x.1).collect::>()); + let keys = validators.map(|x| x.1).collect::>(); + let bounded_keys = WeakBoundedVec::<_, T::MaxKeys>::force_from( + keys, + Some( + "Warning: The session has more keys than expected. \ + A runtime configuration adjustment may be needed.", + ), + ); + Keys::::put(bounded_keys); } fn on_before_session_ending() { diff --git a/frame/im-online/src/mock.rs b/frame/im-online/src/mock.rs index e4031b04271b9..92d1fe8e3f8b9 100644 --- a/frame/im-online/src/mock.rs +++ b/frame/im-online/src/mock.rs @@ -217,6 +217,9 @@ impl frame_support::traits::EstimateNextSessionRotation for TestNextSession parameter_types! { pub const UnsignedPriority: u64 = 1 << 20; + pub const MaxKeys: u32 = 10_000; + pub const MaxPeerInHeartbeats: u32 = 10_000; + pub const MaxPeerDataEncodingSize: u32 = 1_000; } impl Config for Runtime { @@ -227,6 +230,9 @@ impl Config for Runtime { type ReportUnresponsiveness = OffenceHandler; type UnsignedPriority = UnsignedPriority; type WeightInfo = (); + type MaxKeys = MaxKeys; + type MaxPeerInHeartbeats = MaxPeerInHeartbeats; + type MaxPeerDataEncodingSize = MaxPeerDataEncodingSize; } impl frame_system::offchain::SendTransactionTypes for Runtime diff --git a/frame/indices/src/benchmarking.rs b/frame/indices/src/benchmarking.rs index ba0152008c41e..873dc18b20265 100644 --- a/frame/indices/src/benchmarking.rs +++ b/frame/indices/src/benchmarking.rs @@ -20,7 +20,7 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_benchmarking::{account, benchmarks, whitelisted_caller}; use frame_system::RawOrigin; use sp_runtime::traits::Bounded; @@ -91,6 +91,6 @@ benchmarks! { } // TODO in another PR: lookup and unlookup trait weights (not critical) -} -impl_benchmark_test_suite!(Indices, crate::mock::new_test_ext(), crate::mock::Test); + impl_benchmark_test_suite!(Indices, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/frame/lottery/src/benchmarking.rs b/frame/lottery/src/benchmarking.rs index 3b7035c72deb0..5407e16cd633f 100644 --- a/frame/lottery/src/benchmarking.rs +++ b/frame/lottery/src/benchmarking.rs @@ -21,8 +21,8 @@ use super::*; -use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; -use frame_support::traits::{EnsureOrigin, OnInitialize, UnfilteredDispatchable}; +use frame_benchmarking::{account, benchmarks, whitelisted_caller}; +use frame_support::traits::{EnsureOrigin, OnInitialize}; use frame_system::RawOrigin; use sp_runtime::traits::{Bounded, Zero}; @@ -73,11 +73,9 @@ benchmarks! { set_calls { let n in 0 .. T::MaxCalls::get() as u32; let calls = vec![frame_system::Call::::remark { remark: vec![] }.into(); n as usize]; - - let call = Call::::set_calls { calls }; let origin = T::ManagerOrigin::successful_origin(); assert!(CallIndices::::get().is_empty()); - }: { call.dispatch_bypass_filter(origin)? } + }: _(origin, calls) verify { if !n.is_zero() { assert!(!CallIndices::::get().is_empty()); @@ -88,10 +86,8 @@ benchmarks! { let price = BalanceOf::::max_value(); let end = 10u32.into(); let payout = 5u32.into(); - - let call = Call::::start_lottery { price, length: end, delay: payout, repeat: true }; let origin = T::ManagerOrigin::successful_origin(); - }: { call.dispatch_bypass_filter(origin)? } + }: _(origin, price, end, payout, true) verify { assert!(crate::Lottery::::get().is_some()); } @@ -99,9 +95,8 @@ benchmarks! { stop_repeat { setup_lottery::(true)?; assert_eq!(crate::Lottery::::get().unwrap().repeat, true); - let call = Call::::stop_repeat {}; let origin = T::ManagerOrigin::successful_origin(); - }: { call.dispatch_bypass_filter(origin)? } + }: _(origin) verify { assert_eq!(crate::Lottery::::get().unwrap().repeat, false); } @@ -168,6 +163,6 @@ benchmarks! { assert_eq!(Lottery::::pot().1, 0u32.into()); assert!(!T::Currency::free_balance(&winner).is_zero()) } -} -impl_benchmark_test_suite!(Lottery, crate::mock::new_test_ext(), crate::mock::Test); + impl_benchmark_test_suite!(Lottery, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/frame/membership/src/lib.rs b/frame/membership/src/lib.rs index 7922d9efaf569..8fa2abb0ad3f3 100644 --- a/frame/membership/src/lib.rs +++ b/frame/membership/src/lib.rs @@ -357,9 +357,7 @@ impl, I: 'static> SortedMembers for Pallet { #[cfg(feature = "runtime-benchmarks")] mod benchmark { use super::{Pallet as Membership, *}; - use frame_benchmarking::{ - account, benchmarks_instance_pallet, impl_benchmark_test_suite, whitelist, - }; + use frame_benchmarking::{account, benchmarks_instance_pallet, whitelist}; use frame_support::{assert_ok, traits::EnsureOrigin}; use frame_system::RawOrigin; @@ -494,9 +492,9 @@ mod benchmark { assert!(::get_prime().is_none()); #[cfg(test)] crate::tests::clean(); } - } - impl_benchmark_test_suite!(Membership, crate::tests::new_bench_ext(), crate::tests::Test); + impl_benchmark_test_suite!(Membership, crate::tests::new_bench_ext(), crate::tests::Test); + } } #[cfg(test)] @@ -790,9 +788,16 @@ mod tests { fn migration_v4() { new_test_ext().execute_with(|| { use frame_support::traits::PalletInfo; - let old_pallet_name = + let old_pallet_name = "OldMembership"; + let new_pallet_name = ::PalletInfo::name::().unwrap(); - let new_pallet_name = "NewMembership"; + + frame_support::storage::migration::move_pallet( + new_pallet_name.as_bytes(), + old_pallet_name.as_bytes(), + ); + + StorageVersion::new(0).put::(); crate::migrations::v4::pre_migrate::(old_pallet_name, new_pallet_name); crate::migrations::v4::migrate::(old_pallet_name, new_pallet_name); diff --git a/frame/membership/src/migrations/v4.rs b/frame/membership/src/migrations/v4.rs index 9f4b15e468b38..c1c944be1fd4f 100644 --- a/frame/membership/src/migrations/v4.rs +++ b/frame/membership/src/migrations/v4.rs @@ -15,8 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use sp_core::hexdisplay::HexDisplay; -use sp_io::{hashing::twox_128, storage}; +use sp_io::hashing::twox_128; use frame_support::{ traits::{ @@ -85,28 +84,22 @@ pub fn pre_migrate>(old_pallet_name: N, new_ let new_pallet_name = new_pallet_name.as_ref(); log_migration("pre-migration", old_pallet_name, new_pallet_name); - let old_pallet_prefix = twox_128(old_pallet_name.as_bytes()); - assert!(storage::next_key(&old_pallet_prefix) - .map_or(true, |next_key| next_key.starts_with(&old_pallet_prefix))); + if new_pallet_name == old_pallet_name { + return + } let new_pallet_prefix = twox_128(new_pallet_name.as_bytes()); - let storage_version_key = - [&new_pallet_prefix, &twox_128(STORAGE_VERSION_STORAGE_KEY_POSTFIX)[..]].concat(); - // ensure nothing is stored in the new prefix. - assert!( - storage::next_key(&new_pallet_prefix).map_or( - // either nothing is there - true, - // or we ensure that it has no common prefix with twox_128(new), - // or isn't the storage version that is already stored using the pallet name - |next_key| { - !next_key.starts_with(&new_pallet_prefix) || next_key == storage_version_key - }, - ), - "unexpected next_key({}) = {:?}", - new_pallet_name, - HexDisplay::from(&storage::next_key(&new_pallet_prefix).unwrap()), + let storage_version_key = twox_128(STORAGE_VERSION_STORAGE_KEY_POSTFIX); + + let mut new_pallet_prefix_iter = frame_support::storage::KeyPrefixIterator::new( + new_pallet_prefix.to_vec(), + new_pallet_prefix.to_vec(), + |key| Ok(key.to_vec()), ); + + // Ensure nothing except maybe the storage_version_key is stored in the new prefix. + assert!(new_pallet_prefix_iter.all(|key| key == storage_version_key)); + assert!(

::on_chain_storage_version() < 4); } @@ -119,26 +112,27 @@ pub fn post_migrate>(old_pallet_name: N, new let new_pallet_name = new_pallet_name.as_ref(); log_migration("post-migration", old_pallet_name, new_pallet_name); - let old_pallet_prefix = twox_128(old_pallet_name.as_bytes()); - #[cfg(test)] - { - let storage_version_key = - [&old_pallet_prefix, &twox_128(STORAGE_VERSION_STORAGE_KEY_POSTFIX)[..]].concat(); - assert!(storage::next_key(&old_pallet_prefix) - .map_or(true, |next_key| !next_key.starts_with(&old_pallet_prefix) || - next_key == storage_version_key)); - } - #[cfg(not(test))] - { - // Assert that nothing remains at the old prefix - assert!(storage::next_key(&old_pallet_prefix) - .map_or(true, |next_key| !next_key.starts_with(&old_pallet_prefix))); + if new_pallet_name == old_pallet_name { + return } + // Assert that nothing remains at the old prefix. + let old_pallet_prefix = twox_128(old_pallet_name.as_bytes()); + let old_pallet_prefix_iter = frame_support::storage::KeyPrefixIterator::new( + old_pallet_prefix.to_vec(), + old_pallet_prefix.to_vec(), + |_| Ok(()), + ); + assert_eq!(old_pallet_prefix_iter.count(), 0); + + // NOTE: storage_version_key is already in the new prefix. let new_pallet_prefix = twox_128(new_pallet_name.as_bytes()); - // Assert that the storages have been moved to the new prefix - assert!(storage::next_key(&new_pallet_prefix) - .map_or(true, |next_key| next_key.starts_with(&new_pallet_prefix))); + let new_pallet_prefix_iter = frame_support::storage::KeyPrefixIterator::new( + new_pallet_prefix.to_vec(), + new_pallet_prefix.to_vec(), + |_| Ok(()), + ); + assert!(new_pallet_prefix_iter.count() >= 1); assert_eq!(

::on_chain_storage_version(), 4); } diff --git a/frame/merkle-mountain-range/src/benchmarking.rs b/frame/merkle-mountain-range/src/benchmarking.rs index 2680b3d030067..d6ef76d01ac3a 100644 --- a/frame/merkle-mountain-range/src/benchmarking.rs +++ b/frame/merkle-mountain-range/src/benchmarking.rs @@ -17,10 +17,8 @@ //! Benchmarks for the MMR pallet. -#![cfg_attr(not(feature = "std"), no_std)] - use crate::*; -use frame_benchmarking::{benchmarks_instance_pallet, impl_benchmark_test_suite}; +use frame_benchmarking::benchmarks_instance_pallet; use frame_support::traits::OnInitialize; benchmarks_instance_pallet! { @@ -35,6 +33,6 @@ benchmarks_instance_pallet! { } verify { assert_eq!(crate::NumberOfLeaves::::get(), leaves); } -} -impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::mock::Test); + impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::mock::Test); +} diff --git a/frame/multisig/src/benchmarking.rs b/frame/multisig/src/benchmarking.rs index 2e23dff156e07..edfeba253e5f0 100644 --- a/frame/multisig/src/benchmarking.rs +++ b/frame/multisig/src/benchmarking.rs @@ -21,7 +21,7 @@ use super::*; use core::convert::TryInto; -use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite}; +use frame_benchmarking::{account, benchmarks}; use frame_system::RawOrigin; use sp_runtime::traits::Bounded; @@ -297,6 +297,6 @@ benchmarks! { assert!(!Multisigs::::contains_key(multi_account_id, call_hash)); assert!(!Calls::::contains_key(call_hash)); } -} -impl_benchmark_test_suite!(Multisig, crate::tests::new_test_ext(), crate::tests::Test); + impl_benchmark_test_suite!(Multisig, crate::tests::new_test_ext(), crate::tests::Test); +} diff --git a/frame/offences/benchmarking/src/lib.rs b/frame/offences/benchmarking/src/lib.rs index 35e3c1aec9403..dde8aa92c2405 100644 --- a/frame/offences/benchmarking/src/lib.rs +++ b/frame/offences/benchmarking/src/lib.rs @@ -23,7 +23,7 @@ mod mock; use sp_std::{prelude::*, vec}; -use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite}; +use frame_benchmarking::{account, benchmarks}; use frame_support::traits::{Currency, ValidatorSet, ValidatorSetWithIdentification}; use frame_system::{Config as SystemConfig, Pallet as System, RawOrigin}; @@ -399,6 +399,6 @@ benchmarks! { + n // nominators slashed ); } -} -impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/frame/offences/benchmarking/src/mock.rs b/frame/offences/benchmarking/src/mock.rs index c4fd88def0e33..6973e25371d4f 100644 --- a/frame/offences/benchmarking/src/mock.rs +++ b/frame/offences/benchmarking/src/mock.rs @@ -146,6 +146,9 @@ pallet_staking_reward_curve::build! { parameter_types! { pub const RewardCurve: &'static sp_runtime::curve::PiecewiseLinear<'static> = &I_NPOS; pub const MaxNominatorRewardedPerValidator: u32 = 64; + pub const MaxKeys: u32 = 10_000; + pub const MaxPeerInHeartbeats: u32 = 10_000; + pub const MaxPeerDataEncodingSize: u32 = 1_000; } pub type Extrinsic = sp_runtime::testing::TestXt; @@ -174,6 +177,7 @@ impl pallet_staking::Config for Test { type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; type ElectionProvider = onchain::OnChainSequentialPhragmen; type GenesisElectionProvider = Self::ElectionProvider; + type SortedListProvider = pallet_staking::UseNominatorsMap; type WeightInfo = (); } @@ -185,6 +189,9 @@ impl pallet_im_online::Config for Test { type ReportUnresponsiveness = Offences; type UnsignedPriority = (); type WeightInfo = (); + type MaxKeys = MaxKeys; + type MaxPeerInHeartbeats = MaxPeerInHeartbeats; + type MaxPeerDataEncodingSize = MaxPeerDataEncodingSize; } impl pallet_offences::Config for Test { diff --git a/frame/proxy/Cargo.toml b/frame/proxy/Cargo.toml index 83db82990d105..4da712dadf27b 100644 --- a/frame/proxy/Cargo.toml +++ b/frame/proxy/Cargo.toml @@ -42,5 +42,6 @@ std = [ runtime-benchmarks = [ "frame-benchmarking", "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", ] try-runtime = ["frame-support/try-runtime"] diff --git a/frame/proxy/src/benchmarking.rs b/frame/proxy/src/benchmarking.rs index e66f6782c19e1..1eb3ec5770544 100644 --- a/frame/proxy/src/benchmarking.rs +++ b/frame/proxy/src/benchmarking.rs @@ -21,7 +21,7 @@ use super::*; use crate::Pallet as Proxy; -use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_benchmarking::{account, benchmarks, whitelisted_caller}; use frame_system::RawOrigin; use sp_runtime::traits::Bounded; @@ -245,6 +245,6 @@ benchmarks! { verify { assert!(!Proxies::::contains_key(&anon)); } -} -impl_benchmark_test_suite!(Proxy, crate::tests::new_test_ext(), crate::tests::Test); + impl_benchmark_test_suite!(Proxy, crate::tests::new_test_ext(), crate::tests::Test); +} diff --git a/frame/scheduler/src/benchmarking.rs b/frame/scheduler/src/benchmarking.rs index 2c164eaede229..1065f17027744 100644 --- a/frame/scheduler/src/benchmarking.rs +++ b/frame/scheduler/src/benchmarking.rs @@ -20,7 +20,7 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use frame_benchmarking::{benchmarks, impl_benchmark_test_suite}; +use frame_benchmarking::benchmarks; use frame_support::{ensure, traits::OnInitialize}; use frame_system::RawOrigin; use sp_std::{prelude::*, vec}; @@ -139,6 +139,6 @@ benchmarks! { "didn't append schedule" ); } -} -impl_benchmark_test_suite!(Scheduler, crate::tests::new_test_ext(), crate::tests::Test); + impl_benchmark_test_suite!(Scheduler, crate::tests::new_test_ext(), crate::tests::Test); +} diff --git a/frame/session/Cargo.toml b/frame/session/Cargo.toml index 8f07de2e7a6db..3d2de5339543e 100644 --- a/frame/session/Cargo.toml +++ b/frame/session/Cargo.toml @@ -13,38 +13,40 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = [ - "derive", -] } +log = { version = "0.4.0", default-features = false } +impl-trait-for-tuples = "0.2.1" + +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } scale-info = { version = "1.0", default-features = false, features = ["derive"] } -sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primitives/core" } + sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primitives/core" } sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } sp-session = { version = "4.0.0-dev", default-features = false, path = "../../primitives/session" } sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../primitives/staking" } +sp-trie = { version = "4.0.0-dev", default-features = false, path = "../../primitives/trie", optional = true } + frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "../timestamp" } -sp-trie = { version = "4.0.0-dev", optional = true, default-features = false, path = "../../primitives/trie" } -log = { version = "0.4.0", default-features = false } -impl-trait-for-tuples = "0.2.1" [features] default = ["std", "historical"] historical = ["sp-trie"] std = [ + "log/std", "codec/std", "scale-info/std", "sp-std/std", - "sp-io/std", - "frame-support/std", "sp-core/std", + "sp-io/std", "sp-runtime/std", "sp-session/std", "sp-staking/std", - "pallet-timestamp/std", "sp-trie/std", - "log/std", + "frame-support/std", + "frame-system/std", + "pallet-timestamp/std", ] try-runtime = ["frame-support/try-runtime"] diff --git a/frame/session/README.md b/frame/session/README.md index c47b5610de09c..09132470d4433 100644 --- a/frame/session/README.md +++ b/frame/session/README.md @@ -1,11 +1,11 @@ -# Session Module +# Session Pallet The Session module allows validators to manage their session keys, provides a function for changing the session length, and handles session rotation. - [`session::Trait`](https://docs.rs/pallet-session/latest/pallet_session/trait.Config.html) - [`Call`](https://docs.rs/pallet-session/latest/pallet_session/enum.Call.html) -- [`Module`](https://docs.rs/pallet-session/latest/pallet_session/struct.Module.html) +- [`Pallet`](https://docs.rs/pallet-session/latest/pallet_session/struct.Pallet.html) ## Overview @@ -72,11 +72,11 @@ The [Staking pallet](https://docs.rs/pallet-staking/latest/pallet_staking/) uses use pallet_session as session; fn validators() -> Vec<::ValidatorId> { - >::validators() + >::validators() } ``` -## Related Modules +## Related Pallets - [Staking](https://docs.rs/pallet-staking/latest/pallet_staking/) diff --git a/frame/session/benchmarking/Cargo.toml b/frame/session/benchmarking/Cargo.toml index cc242085bf5e4..a24d4a1173ab1 100644 --- a/frame/session/benchmarking/Cargo.toml +++ b/frame/session/benchmarking/Cargo.toml @@ -13,39 +13,37 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-session = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/session" } -sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/runtime" } +rand = { version = "0.7.2", default-features = false } + sp-std = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/std" } -frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/runtime" } +sp-session = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/session" } + frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" } -pallet-staking = { version = "4.0.0-dev", default-features = false, features = [ - "runtime-benchmarks", -], path = "../../staking" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } pallet-session = { version = "4.0.0-dev", default-features = false, path = "../../session" } -rand = { version = "0.7.2", default-features = false } +pallet-staking = { version = "4.0.0-dev", default-features = false, features = ["runtime-benchmarks"], path = "../../staking" } [dev-dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0", features = [ - "derive", -] } +codec = { package = "parity-scale-codec", version = "2.0.0", features = ["derive"] } scale-info = "1.0" sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } -pallet-staking-reward-curve = { version = "4.0.0-dev", path = "../../staking/reward-curve" } sp-io = { version = "4.0.0-dev", path = "../../../primitives/io" } -pallet-timestamp = { version = "4.0.0-dev", path = "../../timestamp" } pallet-balances = { version = "4.0.0-dev", path = "../../balances" } +pallet-timestamp = { version = "4.0.0-dev", path = "../../timestamp" } +pallet-staking-reward-curve = { version = "4.0.0-dev", path = "../../staking/reward-curve" } frame-election-provider-support = { version = "4.0.0-dev", path = "../../election-provider-support" } [features] default = ["std"] std = [ "sp-std/std", - "sp-session/std", "sp-runtime/std", - "frame-system/std", + "sp-session/std", "frame-benchmarking/std", "frame-support/std", - "pallet-staking/std", + "frame-system/std", "pallet-session/std", + "pallet-staking/std", ] diff --git a/frame/session/benchmarking/src/lib.rs b/frame/session/benchmarking/src/lib.rs index 8b84145c1acfd..8ca713b1bbf61 100644 --- a/frame/session/benchmarking/src/lib.rs +++ b/frame/session/benchmarking/src/lib.rs @@ -24,13 +24,13 @@ mod mock; use sp_std::{prelude::*, vec}; -use frame_benchmarking::{benchmarks, impl_benchmark_test_suite}; +use frame_benchmarking::benchmarks; use frame_support::{ codec::Decode, traits::{KeyOwnerProofSystem, OnInitialize}, }; use frame_system::RawOrigin; -use pallet_session::{historical::Module as Historical, Module as Session, *}; +use pallet_session::{historical::Module as Historical, Pallet as Session, *}; use pallet_staking::{ benchmarking::create_validator_with_nominators, testing_utils::create_validators, RewardDestination, @@ -39,7 +39,7 @@ use sp_runtime::traits::{One, StaticLookup}; const MAX_VALIDATORS: u32 = 1000; -pub struct Pallet(pallet_session::Module); +pub struct Pallet(pallet_session::Pallet); pub trait Config: pallet_session::Config + pallet_session::historical::Config + pallet_staking::Config { @@ -47,7 +47,7 @@ pub trait Config: impl OnInitialize for Pallet { fn on_initialize(n: T::BlockNumber) -> frame_support::weights::Weight { - pallet_session::Module::::on_initialize(n) + pallet_session::Pallet::::on_initialize(n) } } @@ -115,6 +115,8 @@ benchmarks! { verify { assert!(Historical::::check_proof(key, key_owner_proof2).is_some()); } + + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test, extra = false); } /// Sets up the benchmark for checking a membership proof. It creates the given @@ -161,5 +163,3 @@ fn check_membership_proof_setup( (key, Historical::::prove(key).unwrap()) } - -impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test, extra = false); diff --git a/frame/session/benchmarking/src/mock.rs b/frame/session/benchmarking/src/mock.rs index c685db2bb2524..4d3a1a2d8689d 100644 --- a/frame/session/benchmarking/src/mock.rs +++ b/frame/session/benchmarking/src/mock.rs @@ -182,6 +182,7 @@ impl pallet_staking::Config for Test { type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; type ElectionProvider = onchain::OnChainSequentialPhragmen; type GenesisElectionProvider = Self::ElectionProvider; + type SortedListProvider = pallet_staking::UseNominatorsMap; type WeightInfo = (); } diff --git a/frame/session/src/historical/mod.rs b/frame/session/src/historical/mod.rs index c9b13e3c7f262..0801b2aca1701 100644 --- a/frame/session/src/historical/mod.rs +++ b/frame/session/src/historical/mod.rs @@ -26,7 +26,7 @@ //! These roots and proofs of inclusion can be generated at any time during the current session. //! Afterwards, the proofs can be fed to a consensus module when reporting misbehavior. -use super::{Module as SessionModule, SessionIndex}; +use super::{Pallet as SessionModule, SessionIndex}; use codec::{Decode, Encode}; use frame_support::{ decl_module, decl_storage, print, @@ -114,11 +114,11 @@ impl ValidatorSet for Module { type ValidatorIdOf = T::ValidatorIdOf; fn session_index() -> sp_staking::SessionIndex { - super::Module::::current_index() + super::Pallet::::current_index() } fn validators() -> Vec { - super::Module::::validators() + super::Pallet::::validators() } } @@ -366,11 +366,13 @@ pub(crate) mod tests { use crate::mock::{ force_new_session, set_next_validators, Session, System, Test, NEXT_VALIDATORS, }; + + use sp_runtime::{key_types::DUMMY, testing::UintAuthorityId}; + use frame_support::{ - traits::{KeyOwnerProofSystem, OnInitialize}, + traits::{GenesisBuild, KeyOwnerProofSystem, OnInitialize}, BasicExternalities, }; - use sp_runtime::{key_types::DUMMY, testing::UintAuthorityId}; type Historical = Module; diff --git a/frame/session/src/historical/offchain.rs b/frame/session/src/historical/offchain.rs index 8583c2bb439be..b646ecc2764f7 100644 --- a/frame/session/src/historical/offchain.rs +++ b/frame/session/src/historical/offchain.rs @@ -30,15 +30,11 @@ use sp_runtime::{ KeyTypeId, }; use sp_session::MembershipProof; - -use super::{ - super::{Pallet as SessionModule, SessionIndex}, - Config, IdentificationTuple, ProvingTrie, -}; - -use super::shared; use sp_std::prelude::*; +use super::{shared, Config, IdentificationTuple, ProvingTrie}; +use crate::{Pallet as SessionModule, SessionIndex}; + /// A set of validators, which was used for a fixed session index. struct ValidatorSet { validator_set: Vec>, @@ -142,23 +138,24 @@ pub fn keep_newest(n_to_keep: usize) { #[cfg(test)] mod tests { - use super::{ - super::{onchain, Module}, - *, - }; - use crate::mock::{ - force_new_session, set_next_validators, Session, System, Test, NEXT_VALIDATORS, + use super::*; + use crate::{ + historical::{onchain, Module}, + mock::{force_new_session, set_next_validators, Session, System, Test, NEXT_VALIDATORS}, }; + use codec::Encode; - use frame_support::traits::{KeyOwnerProofSystem, OnInitialize}; use sp_core::{ crypto::key_types::DUMMY, offchain::{testing::TestOffchainExt, OffchainDbExt, OffchainWorkerExt, StorageKind}, }; - - use frame_support::BasicExternalities; use sp_runtime::testing::UintAuthorityId; + use frame_support::{ + traits::{GenesisBuild, KeyOwnerProofSystem, OnInitialize}, + BasicExternalities, + }; + type Historical = Module; pub fn new_test_ext() -> sp_io::TestExternalities { diff --git a/frame/session/src/historical/onchain.rs b/frame/session/src/historical/onchain.rs index 514e343f4e0f6..c80817c28d723 100644 --- a/frame/session/src/historical/onchain.rs +++ b/frame/session/src/historical/onchain.rs @@ -19,15 +19,11 @@ use codec::Encode; use sp_runtime::traits::Convert; - -use super::{ - super::{Config as SessionConfig, Pallet as SessionModule, SessionIndex}, - Config as HistoricalConfig, -}; - -use super::shared; use sp_std::prelude::*; +use super::{shared, Config as HistoricalConfig}; +use crate::{Config as SessionConfig, Pallet as SessionModule, SessionIndex}; + /// Store the validator-set associated to the `session_index` to the off-chain database. /// /// Further processing is then done [`off-chain side`](super::offchain). diff --git a/frame/session/src/historical/shared.rs b/frame/session/src/historical/shared.rs index e801aa80eef4c..182e9ecacee19 100644 --- a/frame/session/src/historical/shared.rs +++ b/frame/session/src/historical/shared.rs @@ -18,8 +18,8 @@ //! Shared logic between on-chain and off-chain components used for slashing using an off-chain //! worker. -use super::SessionIndex; use codec::Encode; +use sp_staking::SessionIndex; use sp_std::prelude::*; pub(super) const PREFIX: &[u8] = b"session_historical"; diff --git a/frame/session/src/lib.rs b/frame/session/src/lib.rs index 3f5d853d4fa21..2742d302ce439 100644 --- a/frame/session/src/lib.rs +++ b/frame/session/src/lib.rs @@ -15,14 +15,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Session Module +//! # Session Pallet //! -//! The Session module allows validators to manage their session keys, provides a function for +//! The Session pallet allows validators to manage their session keys, provides a function for //! changing the session length, and handles session rotation. //! //! - [`Config`] //! - [`Call`] -//! - [`Module`] +//! - [`Pallet`] //! //! ## Overview //! @@ -95,12 +95,12 @@ //! use pallet_session as session; //! //! fn validators() -> Vec<::ValidatorId> { -//! >::validators() +//! >::validators() //! } //! # fn main(){} //! ``` //! -//! ## Related Modules +//! ## Related Pallets //! //! - [Staking](../pallet_staking/index.html) @@ -114,22 +114,9 @@ mod mock; mod tests; pub mod weights; -use codec::Decode; -use frame_support::{ - decl_error, decl_event, decl_module, decl_storage, - dispatch::{self, DispatchError, DispatchResult}, - ensure, - traits::{ - EstimateNextNewSession, EstimateNextSessionRotation, FindAuthor, Get, OneSessionHandler, - ValidatorRegistration, ValidatorSet, - }, - weights::Weight, - ConsensusEngineId, Parameter, -}; -use frame_system::ensure_signed; use sp_runtime::{ traits::{AtLeast32BitUnsigned, Convert, Member, One, OpaqueKeys, Zero}, - KeyTypeId, Perbill, Permill, RuntimeAppPublic, + ConsensusEngineId, KeyTypeId, Perbill, Permill, RuntimeAppPublic, }; use sp_staking::SessionIndex; use sp_std::{ @@ -137,6 +124,20 @@ use sp_std::{ ops::{Rem, Sub}, prelude::*, }; + +use frame_support::{ + codec::{Decode, MaxEncodedLen}, + dispatch::{DispatchError, DispatchResult}, + ensure, + traits::{ + EstimateNextNewSession, EstimateNextSessionRotation, FindAuthor, Get, OneSessionHandler, + StorageVersion, ValidatorRegistration, ValidatorSet, + }, + weights::Weight, + Parameter, +}; + +pub use pallet::*; pub use weights::WeightInfo; /// Decides whether the session should be ended. @@ -228,7 +229,7 @@ pub trait SessionManager { /// /// Even if the validator-set is the same as before, if any underlying economic conditions have /// changed (i.e. stake-weights), the new validator set must be returned. This is necessary for - /// consensus engines making use of the session module to issue a validator-set change so + /// consensus engines making use of the session pallet to issue a validator-set change so /// misbehavior can be provably associated with the new economic conditions as opposed to the /// old. The returned validator set, if any, will not be applied until `new_index`. `new_index` /// is strictly greater than from previous call. @@ -280,7 +281,7 @@ pub trait SessionHandler { fn on_genesis_session(validators: &[(ValidatorId, Ks)]); /// Session set has changed; act appropriately. Note that this can be called - /// before initialization of your module. + /// before initialization of your pallet. /// /// `changed` is true whenever any of the session keys or underlying economic /// identities or weightings behind those keys has changed. @@ -356,86 +357,83 @@ impl SessionHandler for TestSessionHandler { fn on_disabled(_: usize) {} } -impl ValidatorRegistration for Module { - fn is_registered(id: &T::ValidatorId) -> bool { - Self::load_keys(id).is_some() - } -} - -pub trait Config: frame_system::Config { - /// The overarching event type. - type Event: From + Into<::Event>; +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; - /// A stable ID for a validator. - type ValidatorId: Member + Parameter; + /// The current storage version. + const STORAGE_VERSION: StorageVersion = StorageVersion::new(0); - /// A conversion from account ID to validator ID. - /// - /// Its cost must be at most one storage read. - type ValidatorIdOf: Convert>; - - /// Indicator for when to end the session. - type ShouldEndSession: ShouldEndSession; - - /// Something that can predict the next session rotation. This should typically come from the - /// same logical unit that provides [`ShouldEndSession`], yet, it gives a best effort estimate. - /// It is helpful to implement [`EstimateNextNewSession`]. - type NextSessionRotation: EstimateNextSessionRotation; - - /// Handler for managing new session. - type SessionManager: SessionManager; + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + #[pallet::storage_version(STORAGE_VERSION)] + pub struct Pallet(_); - /// Handler when a session has changed. - type SessionHandler: SessionHandler; + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type Event: From + IsType<::Event>; - /// The keys. - type Keys: OpaqueKeys + Member + Parameter + Default; + /// A stable ID for a validator. + type ValidatorId: Member + Parameter + MaybeSerializeDeserialize + MaxEncodedLen; - /// The fraction of validators set that is safe to be disabled. - /// - /// After the threshold is reached `disabled` method starts to return true, - /// which in combination with `pallet_staking` forces a new era. - type DisabledValidatorsThreshold: Get; + /// A conversion from account ID to validator ID. + /// + /// Its cost must be at most one storage read. + type ValidatorIdOf: Convert>; - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; -} + /// Indicator for when to end the session. + type ShouldEndSession: ShouldEndSession; -decl_storage! { - trait Store for Module as Session { - /// The current set of validators. - Validators get(fn validators): Vec; + /// Something that can predict the next session rotation. This should typically come from + /// the same logical unit that provides [`ShouldEndSession`], yet, it gives a best effort + /// estimate. It is helpful to implement [`EstimateNextNewSession`]. + type NextSessionRotation: EstimateNextSessionRotation; - /// Current index of the session. - CurrentIndex get(fn current_index): SessionIndex; + /// Handler for managing new session. + type SessionManager: SessionManager; - /// True if the underlying economic identities or weighting behind the validators - /// has changed in the queued validator set. - QueuedChanged: bool; + /// Handler when a session has changed. + type SessionHandler: SessionHandler; - /// The queued keys for the next session. When the next session begins, these keys - /// will be used to determine the validator's session keys. - QueuedKeys get(fn queued_keys): Vec<(T::ValidatorId, T::Keys)>; + /// The keys. + type Keys: OpaqueKeys + Member + Parameter + Default + MaybeSerializeDeserialize; - /// Indices of disabled validators. + /// The fraction of validators set that is safe to be disabled. /// - /// The set is cleared when `on_session_ending` returns a new set of identities. - DisabledValidators get(fn disabled_validators): Vec; + /// After the threshold is reached `disabled` method starts to return true, + /// which in combination with `pallet_staking` forces a new era. + type DisabledValidatorsThreshold: Get; - /// The next session keys for a validator. - NextKeys: map hasher(twox_64_concat) T::ValidatorId => Option; + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + } + + #[pallet::genesis_config] + pub struct GenesisConfig { + pub keys: Vec<(T::AccountId, T::ValidatorId, T::Keys)>, + } - /// The owner of a key. The key is the `KeyTypeId` + the encoded key. - KeyOwner: map hasher(twox_64_concat) (KeyTypeId, Vec) => Option; + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + Self { keys: Default::default() } + } } - add_extra_genesis { - config(keys): Vec<(T::AccountId, T::ValidatorId, T::Keys)>; - build(|config: &GenesisConfig| { + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { if T::SessionHandler::KEY_TYPE_IDS.len() != T::Keys::key_ids().len() { panic!("Number of keys in session handler and session keys does not match"); } - T::SessionHandler::KEY_TYPE_IDS.iter().zip(T::Keys::key_ids()).enumerate() + T::SessionHandler::KEY_TYPE_IDS + .iter() + .zip(T::Keys::key_ids()) + .enumerate() .for_each(|(i, (sk, kk))| { if sk != kk { panic!( @@ -445,8 +443,8 @@ decl_storage! { } }); - for (account, val, keys) in config.keys.iter().cloned() { - >::inner_set_keys(&val, keys) + for (account, val, keys) in self.keys.iter().cloned() { + >::inner_set_keys(&val, keys) .expect("genesis config must not contain duplicates; qed"); if frame_system::Pallet::::inc_consumers(&account).is_err() { // This will leak a provider reference, however it only happens once (at @@ -457,25 +455,30 @@ decl_storage! { } } - let initial_validators_0 = T::SessionManager::new_session_genesis(0) - .unwrap_or_else(|| { - frame_support::print("No initial validator provided by `SessionManager`, use \ - session config keys to generate initial validator set."); - config.keys.iter().map(|x| x.1.clone()).collect() + let initial_validators_0 = + T::SessionManager::new_session_genesis(0).unwrap_or_else(|| { + frame_support::print( + "No initial validator provided by `SessionManager`, use \ + session config keys to generate initial validator set.", + ); + self.keys.iter().map(|x| x.1.clone()).collect() }); - assert!(!initial_validators_0.is_empty(), "Empty validator set for session 0 in genesis block!"); + assert!( + !initial_validators_0.is_empty(), + "Empty validator set for session 0 in genesis block!" + ); let initial_validators_1 = T::SessionManager::new_session_genesis(1) .unwrap_or_else(|| initial_validators_0.clone()); - assert!(!initial_validators_1.is_empty(), "Empty validator set for session 1 in genesis block!"); + assert!( + !initial_validators_1.is_empty(), + "Empty validator set for session 1 in genesis block!" + ); let queued_keys: Vec<_> = initial_validators_1 .iter() .cloned() - .map(|v| ( - v.clone(), - >::load_keys(&v).unwrap_or_default(), - )) + .map(|v| (v.clone(), >::load_keys(&v).unwrap_or_default())) .collect(); // Tell everyone about the genesis session keys @@ -485,21 +488,62 @@ decl_storage! { >::put(queued_keys); T::SessionManager::start_session(0); - }); + } } -} -decl_event!( + /// The current set of validators. + #[pallet::storage] + #[pallet::getter(fn validators)] + pub type Validators = StorageValue<_, Vec, ValueQuery>; + + /// Current index of the session. + #[pallet::storage] + #[pallet::getter(fn current_index)] + pub type CurrentIndex = StorageValue<_, SessionIndex, ValueQuery>; + + /// True if the underlying economic identities or weighting behind the validators + /// has changed in the queued validator set. + #[pallet::storage] + pub type QueuedChanged = StorageValue<_, bool, ValueQuery>; + + /// The queued keys for the next session. When the next session begins, these keys + /// will be used to determine the validator's session keys. + #[pallet::storage] + #[pallet::getter(fn queued_keys)] + pub type QueuedKeys = StorageValue<_, Vec<(T::ValidatorId, T::Keys)>, ValueQuery>; + + /// Indices of disabled validators. + /// + /// The set is cleared when `on_session_ending` returns a new set of identities. + #[pallet::storage] + #[pallet::getter(fn disabled_validators)] + pub type DisabledValidators = StorageValue<_, Vec, ValueQuery>; + + /// The next session keys for a validator. + #[pallet::storage] + pub type NextKeys = + StorageMap<_, Twox64Concat, T::ValidatorId, T::Keys, OptionQuery>; + + /// The owner of a key. The key is the `KeyTypeId` + the encoded key. + #[pallet::storage] + pub type KeyOwner = + StorageMap<_, Twox64Concat, (KeyTypeId, Vec), T::ValidatorId, OptionQuery>; + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { /// New session has happened. Note that the argument is the \[session_index\], not the /// block number as the type might suggest. NewSession(SessionIndex), } -); -decl_error! { - /// Error for the session module. - pub enum Error for Module { + /// Old name generated by `decl_event`. + #[deprecated(note = "use `Event` instead")] + pub type RawEvent = Event; + + /// Error for the session pallet. + #[pallet::error] + pub enum Error { /// Invalid ownership proof. InvalidProof, /// No associated validator ID for account. @@ -511,14 +555,26 @@ decl_error! { /// Key setting account is not live, so it's impossible to associate keys. NoAccount, } -} -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - fn deposit_event() = default; + #[pallet::hooks] + impl Hooks> for Pallet { + /// Called when a block is initialized. Will rotate session if it is the last + /// block of the current session. + fn on_initialize(n: T::BlockNumber) -> Weight { + if T::ShouldEndSession::should_end_session(n) { + Self::rotate_session(); + T::BlockWeights::get().max_block + } else { + // NOTE: the non-database part of the weight for `should_end_session(n)` is + // included as weight for empty block, the database part is expected to be in + // cache. + 0 + } + } + } + #[pallet::call] + impl Pallet { /// Sets the session key(s) of the function caller to `keys`. /// Allows an account to set its session key prior to becoming a validator. /// This doesn't take effect until the next session. @@ -526,21 +582,19 @@ decl_module! { /// The dispatch origin of this function must be signed. /// /// # - /// - Complexity: `O(1)` - /// Actual cost depends on the number of length of `T::Keys::key_ids()` which is fixed. + /// - Complexity: `O(1)`. Actual cost depends on the number of length of + /// `T::Keys::key_ids()` which is fixed. /// - DbReads: `origin account`, `T::ValidatorIdOf`, `NextKeys` /// - DbWrites: `origin account`, `NextKeys` /// - DbReads per key id: `KeyOwner` /// - DbWrites per key id: `KeyOwner` /// # - #[weight = T::WeightInfo::set_keys()] - pub fn set_keys(origin, keys: T::Keys, proof: Vec) -> dispatch::DispatchResult { + #[pallet::weight(T::WeightInfo::set_keys())] + pub fn set_keys(origin: OriginFor, keys: T::Keys, proof: Vec) -> DispatchResult { let who = ensure_signed(origin)?; - ensure!(keys.ownership_proof_is_valid(&proof), Error::::InvalidProof); Self::do_set_keys(&who, keys)?; - Ok(()) } @@ -550,43 +604,30 @@ decl_module! { /// The dispatch origin of this function must be signed. /// /// # - /// - Complexity: `O(1)` in number of key types. - /// Actual cost depends on the number of length of `T::Keys::key_ids()` which is fixed. + /// - Complexity: `O(1)` in number of key types. Actual cost depends on the number of length + /// of `T::Keys::key_ids()` which is fixed. /// - DbReads: `T::ValidatorIdOf`, `NextKeys`, `origin account` /// - DbWrites: `NextKeys`, `origin account` /// - DbWrites per key id: `KeyOwner` /// # - #[weight = T::WeightInfo::purge_keys()] - pub fn purge_keys(origin) { + #[pallet::weight(T::WeightInfo::purge_keys())] + pub fn purge_keys(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; Self::do_purge_keys(&who)?; - } - - /// Called when a block is initialized. Will rotate session if it is the last - /// block of the current session. - fn on_initialize(n: T::BlockNumber) -> Weight { - if T::ShouldEndSession::should_end_session(n) { - Self::rotate_session(); - T::BlockWeights::get().max_block - } else { - // NOTE: the non-database part of the weight for `should_end_session(n)` is - // included as weight for empty block, the database part is expected to be in - // cache. - 0 - } + Ok(()) } } } -impl Module { +impl Pallet { /// Move on to next session. Register new validator set and session keys. Changes to the /// validator set have a session of delay to take effect. This allows for equivocation /// punishment after a fork. pub fn rotate_session() { - let session_index = CurrentIndex::get(); + let session_index = >::get(); log::trace!(target: "runtime::session", "rotating session {:?}", session_index); - let changed = QueuedChanged::get(); + let changed = >::get(); // Inform the session handlers that a session is going to end. T::SessionHandler::on_before_session_ending(); @@ -600,12 +641,12 @@ impl Module { if changed { // reset disabled validators - DisabledValidators::take(); + >::take(); } // Increment session index. let session_index = session_index + 1; - CurrentIndex::put(session_index); + >::put(session_index); T::SessionManager::start_session(session_index); @@ -655,7 +696,7 @@ impl Module { }; >::put(queued_amalgamated.clone()); - QueuedChanged::put(next_changed); + >::put(next_changed); // Record that this happened. Self::deposit_event(Event::NewSession(session_index)); @@ -669,7 +710,7 @@ impl Module { /// Returns `true` if this causes a `DisabledValidatorsThreshold` of validators /// to be already disabled. pub fn disable_index(i: usize) -> bool { - let (fire_event, threshold_reached) = DisabledValidators::mutate(|disabled| { + let (fire_event, threshold_reached) = >::mutate(|disabled| { let i = i as u32; if let Err(index) = disabled.binary_search(&i) { let count = >::decode_len().unwrap_or(0) as u32; @@ -688,12 +729,12 @@ impl Module { threshold_reached } - /// Disable the validator identified by `c`. (If using with the staking module, + /// Disable the validator identified by `c`. (If using with the staking pallet, /// this would be their *stash* account.) /// /// Returns `Ok(true)` if more than `DisabledValidatorsThreshold` validators in current /// session is already disabled. - /// If used with the staking module it allows to force a new era in such case. + /// If used with the staking pallet it allows to force a new era in such case. pub fn disable(c: &T::ValidatorId) -> sp_std::result::Result { Self::validators() .iter() @@ -711,7 +752,7 @@ impl Module { /// /// Care should be taken that the raw versions of the /// added keys are unique for every `ValidatorId, KeyTypeId` combination. - /// This is an invariant that the session module typically maintains internally. + /// This is an invariant that the session pallet typically maintains internally. /// /// As the actual values of the keys are typically not known at runtime upgrade, /// it's recommended to initialize the keys to a (unique) dummy value with the expectation @@ -756,7 +797,7 @@ impl Module { /// /// This ensures that the reference counter in system is incremented appropriately and as such /// must accept an account ID, rather than a validator ID. - fn do_set_keys(account: &T::AccountId, keys: T::Keys) -> dispatch::DispatchResult { + fn do_set_keys(account: &T::AccountId, keys: T::Keys) -> DispatchResult { let who = T::ValidatorIdOf::convert(account.clone()) .ok_or(Error::::NoAssociatedValidatorId)?; @@ -850,16 +891,40 @@ impl Module { } } -impl ValidatorSet for Module { +impl ValidatorRegistration for Pallet { + fn is_registered(id: &T::ValidatorId) -> bool { + Self::load_keys(id).is_some() + } +} + +impl ValidatorSet for Pallet { type ValidatorId = T::ValidatorId; type ValidatorIdOf = T::ValidatorIdOf; fn session_index() -> sp_staking::SessionIndex { - Module::::current_index() + Pallet::::current_index() } fn validators() -> Vec { - Module::::validators() + Pallet::::validators() + } +} + +impl EstimateNextNewSession for Pallet { + fn average_session_length() -> T::BlockNumber { + T::NextSessionRotation::average_session_length() + } + + /// This session pallet always calls new_session and next_session at the same time, hence we + /// do a simple proxy and pass the function to next rotation. + fn estimate_next_new_session(now: T::BlockNumber) -> (Option, Weight) { + T::NextSessionRotation::estimate_next_session_rotation(now) + } +} + +impl frame_support::traits::DisabledValidators for Pallet { + fn is_disabled(index: u32) -> bool { + >::disabled_validators().binary_search(&index).is_ok() } } @@ -877,25 +942,7 @@ impl> FindAuthor { let i = Inner::find_author(digests)?; - let validators = >::validators(); + let validators = >::validators(); validators.get(i as usize).map(|k| k.clone()) } } - -impl EstimateNextNewSession for Module { - fn average_session_length() -> T::BlockNumber { - T::NextSessionRotation::average_session_length() - } - - /// This session module always calls new_session and next_session at the same time, hence we - /// do a simple proxy and pass the function to next rotation. - fn estimate_next_new_session(now: T::BlockNumber) -> (Option, Weight) { - T::NextSessionRotation::estimate_next_session_rotation(now) - } -} - -impl frame_support::traits::DisabledValidators for Module { - fn is_disabled(index: u32) -> bool { - >::disabled_validators().binary_search(&index).is_ok() - } -} diff --git a/frame/session/src/mock.rs b/frame/session/src/mock.rs index 449acaff5305d..c6b5f64448114 100644 --- a/frame/session/src/mock.rs +++ b/frame/session/src/mock.rs @@ -21,7 +21,9 @@ use super::*; use crate as pallet_session; #[cfg(feature = "historical")] use crate::historical as pallet_session_historical; -use frame_support::{parameter_types, BasicExternalities}; + +use std::cell::RefCell; + use sp_core::{crypto::key_types::DUMMY, H256}; use sp_runtime::{ impl_opaque_keys, @@ -30,7 +32,8 @@ use sp_runtime::{ Perbill, }; use sp_staking::SessionIndex; -use std::cell::RefCell; + +use frame_support::{parameter_types, traits::GenesisBuild, BasicExternalities}; impl_opaque_keys! { pub struct MockSessionKeys { diff --git a/frame/session/src/tests.rs b/frame/session/src/tests.rs index 23e1c6a993427..47152042d204f 100644 --- a/frame/session/src/tests.rs +++ b/frame/session/src/tests.rs @@ -18,17 +18,19 @@ // Tests for the Session Pallet use super::*; -use codec::Decode; -use frame_support::{assert_noop, assert_ok, traits::OnInitialize}; -use mock::{ +use crate::mock::{ authorities, before_session_end_called, force_new_session, new_test_ext, reset_before_session_end_called, session_changed, set_next_validators, set_session_length, Origin, PreUpgradeMockSessionKeys, Session, System, Test, SESSION_CHANGED, TEST_SESSION_CHANGED, }; + +use codec::Decode; use sp_core::crypto::key_types::DUMMY; use sp_runtime::testing::UintAuthorityId; +use frame_support::{assert_noop, assert_ok, traits::OnInitialize}; + fn initialize_block(block: u64) { SESSION_CHANGED.with(|l| *l.borrow_mut() = false); System::set_block_number(block); diff --git a/frame/society/src/lib.rs b/frame/society/src/lib.rs index c6d63eed20ac0..83b1c4203722b 100644 --- a/frame/society/src/lib.rs +++ b/frame/society/src/lib.rs @@ -15,14 +15,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Society Module +//! # Society Pallet //! //! - [`Config`] //! - [`Call`] //! //! ## Overview //! -//! The Society module is an economic game which incentivizes users to participate +//! The Society pallet is an economic game which incentivizes users to participate //! and maintain a membership society. //! //! ### User Types @@ -77,7 +77,7 @@ //! #### Society Treasury //! //! The membership society is independently funded by a treasury managed by this -//! module. Some subset of this treasury is placed in a Society Pot, which is used +//! pallet. Some subset of this treasury is placed in a Society Pot, which is used //! to determine the number of accepted bids. //! //! #### Rate of Growth @@ -132,7 +132,7 @@ //! the society. A vouching bid can additionally request some portion of that reward as a tip //! to the voucher for vouching for the prospective candidate. //! -//! Every rotation period, Bids are ordered by reward amount, and the module +//! Every rotation period, Bids are ordered by reward amount, and the pallet //! selects as many bids the Society Pot can support for that period. //! //! These selected bids become candidates and move on to the Candidate phase. @@ -251,19 +251,15 @@ mod mock; #[cfg(test)] mod tests; -use codec::{Decode, Encode}; use frame_support::{ - decl_error, decl_event, decl_module, decl_storage, - dispatch::DispatchResult, - ensure, + pallet_prelude::*, traits::{ BalanceStatus, ChangeMembers, Currency, EnsureOrigin, ExistenceRequirement::AllowDeath, - Get, Imbalance, OnUnbalanced, Randomness, ReservableCurrency, + Imbalance, OnUnbalanced, Randomness, ReservableCurrency, }, - weights::Weight, PalletId, }; -use frame_system::{self as system, ensure_root, ensure_signed}; +use frame_system::pallet_prelude::*; use rand_chacha::{ rand_core::{RngCore, SeedableRng}, ChaChaRng, @@ -278,62 +274,14 @@ use sp_runtime::{ }; use sp_std::prelude::*; +pub use pallet::*; + type BalanceOf = - <>::Currency as Currency<::AccountId>>::Balance; -type NegativeImbalanceOf = <::Currency as Currency< + <>::Currency as Currency<::AccountId>>::Balance; +type NegativeImbalanceOf = <>::Currency as Currency< ::AccountId, >>::NegativeImbalance; -/// The module's configuration trait. -pub trait Config: system::Config { - /// The overarching event type. - type Event: From> + Into<::Event>; - - /// The societies's module id - type PalletId: Get; - - /// The currency type used for bidding. - type Currency: ReservableCurrency; - - /// Something that provides randomness in the runtime. - type Randomness: Randomness; - - /// The minimum amount of a deposit required for a bid to be made. - type CandidateDeposit: Get>; - - /// The amount of the unpaid reward that gets deducted in the case that either a skeptic - /// doesn't vote or someone votes in the wrong way. - type WrongSideDeduction: Get>; - - /// The number of times a member may vote the wrong way (or not at all, when they are a skeptic) - /// before they become suspended. - type MaxStrikes: Get; - - /// The amount of incentive paid within each period. Doesn't include VoterTip. - type PeriodSpend: Get>; - - /// The receiver of the signal for when the members have changed. - type MembershipChanged: ChangeMembers; - - /// The number of blocks between candidate/membership rotation periods. - type RotationPeriod: Get; - - /// The maximum duration of the payout lock. - type MaxLockDuration: Get; - - /// The origin that is allowed to call `found`. - type FounderSetOrigin: EnsureOrigin; - - /// The origin that is allowed to make suspension judgements. - type SuspensionJudgementOrigin: EnsureOrigin; - - /// The number of blocks between membership challenges. - type ChallengePeriod: Get; - - /// The maximum number of candidates that we accept per round. - type MaxCandidateIntake: Get; -} - /// A vote by a member on a candidate application. #[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] pub enum Vote { @@ -417,108 +365,320 @@ impl BidKind { } } -// This module's storage items. -decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as Society { - /// The first member. - pub Founder get(fn founder) build(|config: &GenesisConfig| config.members.first().cloned()): - Option; +#[frame_support::pallet] +pub mod pallet { + use super::*; - /// A hash of the rules of this society concerning membership. Can only be set once and - /// only by the founder. - pub Rules get(fn rules): Option; + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); - /// The current set of candidates; bidders that are attempting to become members. - pub Candidates get(fn candidates): Vec>>; + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type Event: From> + IsType<::Event>; - /// The set of suspended candidates. - pub SuspendedCandidates get(fn suspended_candidate): - map hasher(twox_64_concat) T::AccountId - => Option<(BalanceOf, BidKind>)>; + /// The societies's pallet id + #[pallet::constant] + type PalletId: Get; - /// Amount of our account balance that is specifically for the next round's bid(s). - pub Pot get(fn pot) config(): BalanceOf; + /// The currency type used for bidding. + type Currency: ReservableCurrency; - /// The most primary from the most recently approved members. - pub Head get(fn head) build(|config: &GenesisConfig| config.members.first().cloned()): - Option; + /// Something that provides randomness in the runtime. + type Randomness: Randomness; - /// The current set of members, ordered. - pub Members get(fn members) build(|config: &GenesisConfig| { - let mut m = config.members.clone(); - m.sort(); - m - }): Vec; + /// The minimum amount of a deposit required for a bid to be made. + #[pallet::constant] + type CandidateDeposit: Get>; + + /// The amount of the unpaid reward that gets deducted in the case that either a skeptic + /// doesn't vote or someone votes in the wrong way. + #[pallet::constant] + type WrongSideDeduction: Get>; - /// The set of suspended members. - pub SuspendedMembers get(fn suspended_member): map hasher(twox_64_concat) T::AccountId => bool; + /// The number of times a member may vote the wrong way (or not at all, when they are a + /// skeptic) before they become suspended. + #[pallet::constant] + type MaxStrikes: Get; - /// The current bids, stored ordered by the value of the bid. - Bids: Vec>>; + /// The amount of incentive paid within each period. Doesn't include VoterTip. + #[pallet::constant] + type PeriodSpend: Get>; - /// Members currently vouching or banned from vouching again - Vouching get(fn vouching): map hasher(twox_64_concat) T::AccountId => Option; + /// The receiver of the signal for when the members have changed. + type MembershipChanged: ChangeMembers; - /// Pending payouts; ordered by block number, with the amount that should be paid out. - Payouts: map hasher(twox_64_concat) T::AccountId => Vec<(T::BlockNumber, BalanceOf)>; + /// The number of blocks between candidate/membership rotation periods. + #[pallet::constant] + type RotationPeriod: Get; - /// The ongoing number of losing votes cast by the member. - Strikes: map hasher(twox_64_concat) T::AccountId => StrikeCount; + /// The maximum duration of the payout lock. + #[pallet::constant] + type MaxLockDuration: Get; - /// Double map from Candidate -> Voter -> (Maybe) Vote. - Votes: double_map - hasher(twox_64_concat) T::AccountId, - hasher(twox_64_concat) T::AccountId - => Option; + /// The origin that is allowed to call `found`. + type FounderSetOrigin: EnsureOrigin; - /// The defending member currently being challenged. - Defender get(fn defender): Option; + /// The origin that is allowed to make suspension judgements. + type SuspensionJudgementOrigin: EnsureOrigin; - /// Votes for the defender. - DefenderVotes: map hasher(twox_64_concat) T::AccountId => Option; + /// The number of blocks between membership challenges. + #[pallet::constant] + type ChallengePeriod: Get; - /// The max number of members for the society at one time. - MaxMembers get(fn max_members) config(): u32; + /// The maximum number of candidates that we accept per round. + #[pallet::constant] + type MaxCandidateIntake: Get; + } + + #[pallet::error] + pub enum Error { + /// An incorrect position was provided. + BadPosition, + /// User is not a member. + NotMember, + /// User is already a member. + AlreadyMember, + /// User is suspended. + Suspended, + /// User is not suspended. + NotSuspended, + /// Nothing to payout. + NoPayout, + /// Society already founded. + AlreadyFounded, + /// Not enough in pot to accept candidate. + InsufficientPot, + /// Member is already vouching or banned from vouching again. + AlreadyVouching, + /// Member is not vouching. + NotVouching, + /// Cannot remove the head of the chain. + Head, + /// Cannot remove the founder. + Founder, + /// User has already made a bid. + AlreadyBid, + /// User is already a candidate. + AlreadyCandidate, + /// User is not a candidate. + NotCandidate, + /// Too many members in the society. + MaxMembers, + /// The caller is not the founder. + NotFounder, + /// The caller is not the head. + NotHead, } - add_extra_genesis { - config(members): Vec; + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event, I: 'static = ()> { + /// The society is founded by the given identity. \[founder\] + Founded(T::AccountId), + /// A membership bid just happened. The given account is the candidate's ID and their offer + /// is the second. \[candidate_id, offer\] + Bid(T::AccountId, BalanceOf), + /// A membership bid just happened by vouching. The given account is the candidate's ID and + /// their offer is the second. The vouching party is the third. \[candidate_id, offer, + /// vouching\] + Vouch(T::AccountId, BalanceOf, T::AccountId), + /// A \[candidate\] was dropped (due to an excess of bids in the system). + AutoUnbid(T::AccountId), + /// A \[candidate\] was dropped (by their request). + Unbid(T::AccountId), + /// A \[candidate\] was dropped (by request of who vouched for them). + Unvouch(T::AccountId), + /// A group of candidates have been inducted. The batch's primary is the first value, the + /// batch in full is the second. \[primary, candidates\] + Inducted(T::AccountId, Vec), + /// A suspended member has been judged. \[who, judged\] + SuspendedMemberJudgement(T::AccountId, bool), + /// A \[candidate\] has been suspended + CandidateSuspended(T::AccountId), + /// A \[member\] has been suspended + MemberSuspended(T::AccountId), + /// A \[member\] has been challenged + Challenged(T::AccountId), + /// A vote has been placed \[candidate, voter, vote\] + Vote(T::AccountId, T::AccountId, bool), + /// A vote has been placed for a defending member \[voter, vote\] + DefenderVote(T::AccountId, bool), + /// A new \[max\] member count has been set + NewMaxMembers(u32), + /// Society is unfounded. \[founder\] + Unfounded(T::AccountId), + /// Some funds were deposited into the society account. \[value\] + Deposit(BalanceOf), } -} -// The module's dispatchable functions. -decl_module! { - /// The module declaration. - pub struct Module, I: Instance=DefaultInstance> for enum Call where origin: T::Origin { - type Error = Error; - /// The minimum amount of a deposit required for a bid to be made. - const CandidateDeposit: BalanceOf = T::CandidateDeposit::get(); + /// Old name generated by `decl_event`. + #[deprecated(note = "use `Event` instead")] + pub type RawEvent = Event; + + /// The first member. + #[pallet::storage] + #[pallet::getter(fn founder)] + pub type Founder, I: 'static = ()> = StorageValue<_, T::AccountId>; + + /// A hash of the rules of this society concerning membership. Can only be set once and + /// only by the founder. + #[pallet::storage] + #[pallet::getter(fn rules)] + pub type Rules, I: 'static = ()> = StorageValue<_, T::Hash>; + + /// The current set of candidates; bidders that are attempting to become members. + #[pallet::storage] + #[pallet::getter(fn candidates)] + pub type Candidates, I: 'static = ()> = + StorageValue<_, Vec>>, ValueQuery>; + + /// The set of suspended candidates. + #[pallet::storage] + #[pallet::getter(fn suspended_candidate)] + pub type SuspendedCandidates, I: 'static = ()> = StorageMap< + _, + Twox64Concat, + T::AccountId, + (BalanceOf, BidKind>), + >; + + /// Amount of our account balance that is specifically for the next round's bid(s). + #[pallet::storage] + #[pallet::getter(fn pot)] + pub type Pot, I: 'static = ()> = StorageValue<_, BalanceOf, ValueQuery>; + + /// The most primary from the most recently approved members. + #[pallet::storage] + #[pallet::getter(fn head)] + pub type Head, I: 'static = ()> = StorageValue<_, T::AccountId>; + + /// The current set of members, ordered. + #[pallet::storage] + #[pallet::getter(fn members)] + pub type Members, I: 'static = ()> = + StorageValue<_, Vec, ValueQuery>; + + /// The set of suspended members. + #[pallet::storage] + #[pallet::getter(fn suspended_member)] + pub type SuspendedMembers, I: 'static = ()> = + StorageMap<_, Twox64Concat, T::AccountId, bool, ValueQuery>; + + /// The current bids, stored ordered by the value of the bid. + #[pallet::storage] + pub(super) type Bids, I: 'static = ()> = + StorageValue<_, Vec>>, ValueQuery>; + + /// Members currently vouching or banned from vouching again + #[pallet::storage] + #[pallet::getter(fn vouching)] + pub(super) type Vouching, I: 'static = ()> = + StorageMap<_, Twox64Concat, T::AccountId, VouchingStatus>; + + /// Pending payouts; ordered by block number, with the amount that should be paid out. + #[pallet::storage] + pub(super) type Payouts, I: 'static = ()> = StorageMap< + _, + Twox64Concat, + T::AccountId, + Vec<(T::BlockNumber, BalanceOf)>, + ValueQuery, + >; + + /// The ongoing number of losing votes cast by the member. + #[pallet::storage] + pub(super) type Strikes, I: 'static = ()> = + StorageMap<_, Twox64Concat, T::AccountId, StrikeCount, ValueQuery>; + + /// Double map from Candidate -> Voter -> (Maybe) Vote. + #[pallet::storage] + pub(super) type Votes, I: 'static = ()> = + StorageDoubleMap<_, Twox64Concat, T::AccountId, Twox64Concat, T::AccountId, Vote>; + + /// The defending member currently being challenged. + #[pallet::storage] + #[pallet::getter(fn defender)] + pub(super) type Defender, I: 'static = ()> = StorageValue<_, T::AccountId>; + + /// Votes for the defender. + #[pallet::storage] + pub(super) type DefenderVotes, I: 'static = ()> = + StorageMap<_, Twox64Concat, T::AccountId, Vote>; + + /// The max number of members for the society at one time. + #[pallet::storage] + #[pallet::getter(fn max_members)] + pub(super) type MaxMembers, I: 'static = ()> = StorageValue<_, u32, ValueQuery>; + + #[pallet::hooks] + impl, I: 'static> Hooks> for Pallet { + fn on_initialize(n: T::BlockNumber) -> Weight { + let mut members = vec![]; - /// The amount of the unpaid reward that gets deducted in the case that either a skeptic - /// doesn't vote or someone votes in the wrong way. - const WrongSideDeduction: BalanceOf = T::WrongSideDeduction::get(); + let mut weight = 0; + let weights = T::BlockWeights::get(); - /// The number of times a member may vote the wrong way (or not at all, when they are a skeptic) - /// before they become suspended. - const MaxStrikes: u32 = T::MaxStrikes::get(); + // Run a candidate/membership rotation + if (n % T::RotationPeriod::get()).is_zero() { + members = >::get(); + Self::rotate_period(&mut members); - /// The amount of incentive paid within each period. Doesn't include VoterTip. - const PeriodSpend: BalanceOf = T::PeriodSpend::get(); + weight += weights.max_block / 20; + } - /// The number of blocks between candidate/membership rotation periods. - const RotationPeriod: T::BlockNumber = T::RotationPeriod::get(); + // Run a challenge rotation + if (n % T::ChallengePeriod::get()).is_zero() { + // Only read members if not already read. + if members.is_empty() { + members = >::get(); + } + Self::rotate_challenge(&mut members); - /// The number of blocks between membership challenges. - const ChallengePeriod: T::BlockNumber = T::ChallengePeriod::get(); + weight += weights.max_block / 20; + } - /// The societies's module id - const PalletId: PalletId = T::PalletId::get(); + weight + } + } + + #[pallet::genesis_config] + pub struct GenesisConfig, I: 'static = ()> { + pub pot: BalanceOf, + pub members: Vec, + pub max_members: u32, + } - /// Maximum candidate intake per round. - const MaxCandidateIntake: u32 = T::MaxCandidateIntake::get(); + #[cfg(feature = "std")] + impl, I: 'static> Default for GenesisConfig { + fn default() -> Self { + Self { + pot: Default::default(), + members: Default::default(), + max_members: Default::default(), + } + } + } - // Used for handling module events. - fn deposit_event() = default; + #[pallet::genesis_build] + impl, I: 'static> GenesisBuild for GenesisConfig { + fn build(&self) { + Pot::::put(self.pot); + MaxMembers::::put(self.max_members); + let first_member = self.members.first(); + if let Some(member) = first_member { + Founder::::put(member.clone()); + Head::::put(member.clone()); + }; + let mut m = self.members.clone(); + m.sort(); + Members::::put(m); + } + } + #[pallet::call] + impl, I: 'static> Pallet { /// A user outside of the society can make a bid for entry. /// /// Payment: `CandidateDeposit` will be reserved for making a bid. It is returned @@ -538,12 +698,13 @@ decl_module! { /// - One storage read to retrieve all current candidates. O(C) /// - One storage read to retrieve all members. O(M) /// - Storage Writes: - /// - One storage mutate to add a new bid to the vector O(B) (TODO: possible optimization w/ read) + /// - One storage mutate to add a new bid to the vector O(B) (TODO: possible optimization + /// w/ read) /// - Up to one storage removal if bid.len() > MAX_BID_COUNT. O(1) /// - Notable Computation: /// - O(B + C + log M) search to check user is not already a part of society. /// - O(log B) search to insert the new bid sorted. - /// - External Module Operations: + /// - External Pallet Operations: /// - One balance reserve operation. O(X) /// - Up to one balance unreserve operation if bids.len() > MAX_BID_COUNT. /// - Events: @@ -552,8 +713,8 @@ decl_module! { /// /// Total Complexity: O(M + B + C + logM + logB + X) /// # - #[weight = T::BlockWeights::get().max_block / 10] - pub fn bid(origin, value: BalanceOf) -> DispatchResult { + #[pallet::weight(T::BlockWeights::get().max_block / 10)] + pub fn bid(origin: OriginFor, value: BalanceOf) -> DispatchResult { let who = ensure_signed(origin)?; ensure!(!>::contains_key(&who), Error::::Suspended); ensure!(!>::contains_key(&who), Error::::Suspended); @@ -562,13 +723,13 @@ decl_module! { let candidates = >::get(); ensure!(!Self::is_candidate(&candidates, &who), Error::::AlreadyCandidate); let members = >::get(); - ensure!(!Self::is_member(&members ,&who), Error::::AlreadyMember); + ensure!(!Self::is_member(&members, &who), Error::::AlreadyMember); let deposit = T::CandidateDeposit::get(); T::Currency::reserve(&who, deposit)?; Self::put_bid(bids, &who, value.clone(), BidKind::Deposit(deposit)); - Self::deposit_event(RawEvent::Bid(who, value)); + Self::deposit_event(Event::::Bid(who, value)); Ok(()) } @@ -591,12 +752,12 @@ decl_module! { /// /// Total Complexity: O(B + X) /// # - #[weight = T::BlockWeights::get().max_block / 10] - pub fn unbid(origin, pos: u32) -> DispatchResult { + #[pallet::weight(T::BlockWeights::get().max_block / 10)] + pub fn unbid(origin: OriginFor, pos: u32) -> DispatchResult { let who = ensure_signed(origin)?; let pos = pos as usize; - >::mutate(|b| + >::mutate(|b| { if pos < b.len() && b[pos].who == who { // Either unreserve the deposit or free up the vouching member. // In neither case can we do much if the action isn't completable, but there's @@ -605,17 +766,17 @@ decl_module! { BidKind::Deposit(deposit) => { let err_amount = T::Currency::unreserve(&who, deposit); debug_assert!(err_amount.is_zero()); - } + }, BidKind::Vouch(voucher, _) => { >::remove(&voucher); - } + }, } - Self::deposit_event(RawEvent::Unbid(who)); + Self::deposit_event(Event::::Unbid(who)); Ok(()) } else { Err(Error::::BadPosition)? } - ) + }) } /// As a member, vouch for someone to join society by placing a bid on their behalf. @@ -647,13 +808,14 @@ decl_module! { /// - One storage read to retrieve all current candidates. O(C) /// - Storage Writes: /// - One storage write to insert vouching status to the member. O(1) - /// - One storage mutate to add a new bid to the vector O(B) (TODO: possible optimization w/ read) + /// - One storage mutate to add a new bid to the vector O(B) (TODO: possible optimization + /// w/ read) /// - Up to one storage removal if bid.len() > MAX_BID_COUNT. O(1) /// - Notable Computation: /// - O(log M) search to check sender is a member. /// - O(B + C + log M) search to check user is not already a part of society. /// - O(log B) search to insert the new bid sorted. - /// - External Module Operations: + /// - External Pallet Operations: /// - One balance reserve operation. O(X) /// - Up to one balance unreserve operation if bids.len() > MAX_BID_COUNT. /// - Events: @@ -662,8 +824,13 @@ decl_module! { /// /// Total Complexity: O(M + B + C + logM + logB + X) /// # - #[weight = T::BlockWeights::get().max_block / 10] - pub fn vouch(origin, who: T::AccountId, value: BalanceOf, tip: BalanceOf) -> DispatchResult { + #[pallet::weight(T::BlockWeights::get().max_block / 10)] + pub fn vouch( + origin: OriginFor, + who: T::AccountId, + value: BalanceOf, + tip: BalanceOf, + ) -> DispatchResult { let voucher = ensure_signed(origin)?; // Check user is not suspended. ensure!(!>::contains_key(&who), Error::::Suspended); @@ -682,7 +849,7 @@ decl_module! { >::insert(&voucher, VouchingStatus::Vouching); Self::put_bid(bids, &who, value.clone(), BidKind::Vouch(voucher.clone(), tip)); - Self::deposit_event(RawEvent::Vouch(who, value, voucher)); + Self::deposit_event(Event::::Vouch(who, value, voucher)); Ok(()) } @@ -703,23 +870,26 @@ decl_module! { /// /// Total Complexity: O(B) /// # - #[weight = T::BlockWeights::get().max_block / 10] - pub fn unvouch(origin, pos: u32) -> DispatchResult { + #[pallet::weight(T::BlockWeights::get().max_block / 10)] + pub fn unvouch(origin: OriginFor, pos: u32) -> DispatchResult { let voucher = ensure_signed(origin)?; - ensure!(Self::vouching(&voucher) == Some(VouchingStatus::Vouching), Error::::NotVouching); + ensure!( + Self::vouching(&voucher) == Some(VouchingStatus::Vouching), + Error::::NotVouching + ); let pos = pos as usize; - >::mutate(|b| + >::mutate(|b| { if pos < b.len() { b[pos].kind.check_voucher(&voucher)?; >::remove(&voucher); let who = b.remove(pos).who; - Self::deposit_event(RawEvent::Unvouch(who)); + Self::deposit_event(Event::::Unvouch(who)); Ok(()) } else { Err(Error::::BadPosition)? } - ) + }) } /// As a member, vote on a candidate. @@ -728,8 +898,8 @@ decl_module! { /// /// Parameters: /// - `candidate`: The candidate that the member would like to bid on. - /// - `approve`: A boolean which says if the candidate should be - /// approved (`true`) or rejected (`false`). + /// - `approve`: A boolean which says if the candidate should be approved (`true`) or + /// rejected (`false`). /// /// # /// Key: C (len of candidates), M (len of members) @@ -741,8 +911,12 @@ decl_module! { /// /// Total Complexity: O(M + logM + C) /// # - #[weight = T::BlockWeights::get().max_block / 10] - pub fn vote(origin, candidate: ::Source, approve: bool) { + #[pallet::weight(T::BlockWeights::get().max_block / 10)] + pub fn vote( + origin: OriginFor, + candidate: ::Source, + approve: bool, + ) -> DispatchResult { let voter = ensure_signed(origin)?; let candidate = T::Lookup::lookup(candidate)?; let candidates = >::get(); @@ -753,7 +927,8 @@ decl_module! { let vote = if approve { Vote::Approve } else { Vote::Reject }; >::insert(&candidate, &voter, vote); - Self::deposit_event(RawEvent::Vote(candidate, voter, approve)); + Self::deposit_event(Event::::Vote(candidate, voter, approve)); + Ok(()) } /// As a member, vote on the defender. @@ -772,8 +947,8 @@ decl_module! { /// /// Total Complexity: O(M + logM) /// # - #[weight = T::BlockWeights::get().max_block / 10] - pub fn defender_vote(origin, approve: bool) { + #[pallet::weight(T::BlockWeights::get().max_block / 10)] + pub fn defender_vote(origin: OriginFor, approve: bool) -> DispatchResult { let voter = ensure_signed(origin)?; let members = >::get(); ensure!(Self::is_member(&members, &voter), Error::::NotMember); @@ -781,12 +956,14 @@ decl_module! { let vote = if approve { Vote::Approve } else { Vote::Reject }; >::insert(&voter, vote); - Self::deposit_event(RawEvent::DefenderVote(voter, approve)); + Self::deposit_event(Event::::DefenderVote(voter, approve)); + Ok(()) } /// Transfer the first matured payout for the sender and remove it from the records. /// - /// NOTE: This extrinsic needs to be called multiple times to claim multiple matured payouts. + /// NOTE: This extrinsic needs to be called multiple times to claim multiple matured + /// payouts. /// /// Payment: The member will receive a payment equal to their first matured /// payout to their free balance. @@ -804,8 +981,8 @@ decl_module! { /// /// Total Complexity: O(M + logM + P + X) /// # - #[weight = T::BlockWeights::get().max_block / 10] - pub fn payout(origin) { + #[pallet::weight(T::BlockWeights::get().max_block / 10)] + pub fn payout(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; let members = >::get(); @@ -813,7 +990,7 @@ decl_module! { let mut payouts = >::get(&who); if let Some((when, amount)) = payouts.first() { - if when <= &>::block_number() { + if when <= &>::block_number() { T::Currency::transfer(&Self::payouts(), &who, *amount, AllowDeath)?; payouts.remove(0); if payouts.is_empty() { @@ -830,7 +1007,7 @@ decl_module! { /// Found the society. /// /// This is done as a discrete action in order to allow for the - /// module to be included into a running chain and can only be done once. + /// pallet to be included into a running chain and can only be done once. /// /// The dispatch origin for this call must be from the _FounderSetOrigin_. /// @@ -846,18 +1023,24 @@ decl_module! { /// /// Total Complexity: O(1) /// # - #[weight = T::BlockWeights::get().max_block / 10] - fn found(origin, founder: T::AccountId, max_members: u32, rules: Vec) { + #[pallet::weight(T::BlockWeights::get().max_block / 10)] + pub fn found( + origin: OriginFor, + founder: T::AccountId, + max_members: u32, + rules: Vec, + ) -> DispatchResult { T::FounderSetOrigin::ensure_origin(origin)?; ensure!(!>::exists(), Error::::AlreadyFounded); ensure!(max_members > 1, Error::::MaxMembers); // This should never fail in the context of this function... - >::put(max_members); + >::put(max_members); Self::add_member(&founder)?; >::put(&founder); >::put(&founder); Rules::::put(T::Hashing::hash(&rules)); - Self::deposit_event(RawEvent::Founded(founder)); + Self::deposit_event(Event::::Founded(founder)); + Ok(()) } /// Annul the founding of the society. @@ -873,8 +1056,8 @@ decl_module! { /// /// Total Complexity: O(1) /// # - #[weight = T::BlockWeights::get().max_block / 10] - fn unfound(origin) { + #[pallet::weight(T::BlockWeights::get().max_block / 10)] + pub fn unfound(origin: OriginFor) -> DispatchResult { let founder = ensure_signed(origin)?; ensure!(Founder::::get() == Some(founder.clone()), Error::::NotFounder); ensure!(Head::::get() == Some(founder.clone()), Error::::NotHead); @@ -885,7 +1068,8 @@ decl_module! { Rules::::kill(); Candidates::::kill(); SuspendedCandidates::::remove_all(None); - Self::deposit_event(RawEvent::Unfounded(founder)); + Self::deposit_event(Event::::Unfounded(founder)); + Ok(()) } /// Allow suspension judgement origin to make judgement on a suspended member. @@ -900,13 +1084,14 @@ decl_module! { /// /// Parameters: /// - `who` - The suspended member to be judged. - /// - `forgive` - A boolean representing whether the suspension judgement origin - /// forgives (`true`) or rejects (`false`) a suspended member. + /// - `forgive` - A boolean representing whether the suspension judgement origin forgives + /// (`true`) or rejects (`false`) a suspended member. /// /// # /// Key: B (len of bids), M (len of members) /// - One storage read to check `who` is a suspended member. O(1) - /// - Up to one storage write O(M) with O(log M) binary search to add a member back to society. + /// - Up to one storage write O(M) with O(log M) binary search to add a member back to + /// society. /// - Up to 3 storage removals O(1) to clean up a removed member. /// - Up to one storage write O(B) with O(B) search to remove vouched bid from bids. /// - Up to one additional event if unvouch takes place. @@ -915,8 +1100,12 @@ decl_module! { /// /// Total Complexity: O(M + logM + B) /// # - #[weight = T::BlockWeights::get().max_block / 10] - fn judge_suspended_member(origin, who: T::AccountId, forgive: bool) { + #[pallet::weight(T::BlockWeights::get().max_block / 10)] + pub fn judge_suspended_member( + origin: OriginFor, + who: T::AccountId, + forgive: bool, + ) -> DispatchResult { T::SuspensionJudgementOrigin::ensure_origin(origin)?; ensure!(>::contains_key(&who), Error::::NotSuspended); @@ -936,14 +1125,15 @@ decl_module! { if let Some(pos) = bids.iter().position(|b| b.kind.check_voucher(&who).is_ok()) { // Remove the bid, and emit an event let vouched = bids.remove(pos).who; - Self::deposit_event(RawEvent::Unvouch(vouched)); + Self::deposit_event(Event::::Unvouch(vouched)); } ); } } >::remove(&who); - Self::deposit_event(RawEvent::SuspendedMemberJudgement(who, forgive)); + Self::deposit_event(Event::::SuspendedMemberJudgement(who, forgive)); + Ok(()) } /// Allow suspended judgement origin to make judgement on a suspended candidate. @@ -986,8 +1176,12 @@ decl_module! { /// /// Total Complexity: O(M + logM + B + X) /// # - #[weight = T::BlockWeights::get().max_block / 10] - fn judge_suspended_candidate(origin, who: T::AccountId, judgement: Judgement) { + #[pallet::weight(T::BlockWeights::get().max_block / 10)] + pub fn judge_suspended_candidate( + origin: OriginFor, + who: T::AccountId, + judgement: Judgement, + ) -> DispatchResult { T::SuspensionJudgementOrigin::ensure_origin(origin)?; if let Some((value, kind)) = >::get(&who) { match judgement { @@ -1001,29 +1195,35 @@ decl_module! { // Reduce next pot by payout >::put(pot - value); // Add payout for new candidate - let maturity = >::block_number() - + Self::lock_duration(Self::members().len() as u32); + let maturity = >::block_number() + + Self::lock_duration(Self::members().len() as u32); Self::pay_accepted_candidate(&who, value, kind, maturity); - } + }, Judgement::Reject => { // Founder has rejected this candidate match kind { BidKind::Deposit(deposit) => { // Slash deposit and move it to the society account - let res = T::Currency::repatriate_reserved(&who, &Self::account_id(), deposit, BalanceStatus::Free); + let res = T::Currency::repatriate_reserved( + &who, + &Self::account_id(), + deposit, + BalanceStatus::Free, + ); debug_assert!(res.is_ok()); - } + }, BidKind::Vouch(voucher, _) => { // Ban the voucher from vouching again >::insert(&voucher, VouchingStatus::Banned); - } + }, } - } + }, Judgement::Rebid => { - // Founder has taken no judgement, and candidate is placed back into the pool. + // Founder has taken no judgement, and candidate is placed back into the + // pool. let bids = >::get(); Self::put_bid(bids, &who, value, kind); - } + }, } // Remove suspended candidate @@ -1031,6 +1231,7 @@ decl_module! { } else { Err(Error::::NotSuspended)? } + Ok(()) } /// Allows root origin to change the maximum number of members in society. @@ -1047,137 +1248,24 @@ decl_module! { /// /// Total Complexity: O(1) /// # - #[weight = T::BlockWeights::get().max_block / 10] - fn set_max_members(origin, max: u32) { + #[pallet::weight(T::BlockWeights::get().max_block / 10)] + pub fn set_max_members(origin: OriginFor, max: u32) -> DispatchResult { ensure_root(origin)?; ensure!(max > 1, Error::::MaxMembers); - MaxMembers::::put(max); - Self::deposit_event(RawEvent::NewMaxMembers(max)); - } - - fn on_initialize(n: T::BlockNumber) -> Weight { - let mut members = vec![]; - - let mut weight = 0; - let weights = T::BlockWeights::get(); - - // Run a candidate/membership rotation - if (n % T::RotationPeriod::get()).is_zero() { - members = >::get(); - Self::rotate_period(&mut members); - - weight += weights.max_block / 20; - } - - // Run a challenge rotation - if (n % T::ChallengePeriod::get()).is_zero() { - // Only read members if not already read. - if members.is_empty() { - members = >::get(); - } - Self::rotate_challenge(&mut members); - - weight += weights.max_block / 20; - } - - weight + MaxMembers::::put(max); + Self::deposit_event(Event::::NewMaxMembers(max)); + Ok(()) } } } -decl_error! { - /// Errors for this module. - pub enum Error for Module, I: Instance> { - /// An incorrect position was provided. - BadPosition, - /// User is not a member. - NotMember, - /// User is already a member. - AlreadyMember, - /// User is suspended. - Suspended, - /// User is not suspended. - NotSuspended, - /// Nothing to payout. - NoPayout, - /// Society already founded. - AlreadyFounded, - /// Not enough in pot to accept candidate. - InsufficientPot, - /// Member is already vouching or banned from vouching again. - AlreadyVouching, - /// Member is not vouching. - NotVouching, - /// Cannot remove the head of the chain. - Head, - /// Cannot remove the founder. - Founder, - /// User has already made a bid. - AlreadyBid, - /// User is already a candidate. - AlreadyCandidate, - /// User is not a candidate. - NotCandidate, - /// Too many members in the society. - MaxMembers, - /// The caller is not the founder. - NotFounder, - /// The caller is not the head. - NotHead, - } -} - -decl_event! { - /// Events for this module. - pub enum Event where - AccountId = ::AccountId, - Balance = BalanceOf - { - /// The society is founded by the given identity. \[founder\] - Founded(AccountId), - /// A membership bid just happened. The given account is the candidate's ID and their offer - /// is the second. \[candidate_id, offer\] - Bid(AccountId, Balance), - /// A membership bid just happened by vouching. The given account is the candidate's ID and - /// their offer is the second. The vouching party is the third. \[candidate_id, offer, vouching\] - Vouch(AccountId, Balance, AccountId), - /// A \[candidate\] was dropped (due to an excess of bids in the system). - AutoUnbid(AccountId), - /// A \[candidate\] was dropped (by their request). - Unbid(AccountId), - /// A \[candidate\] was dropped (by request of who vouched for them). - Unvouch(AccountId), - /// A group of candidates have been inducted. The batch's primary is the first value, the - /// batch in full is the second. \[primary, candidates\] - Inducted(AccountId, Vec), - /// A suspended member has been judged. \[who, judged\] - SuspendedMemberJudgement(AccountId, bool), - /// A \[candidate\] has been suspended - CandidateSuspended(AccountId), - /// A \[member\] has been suspended - MemberSuspended(AccountId), - /// A \[member\] has been challenged - Challenged(AccountId), - /// A vote has been placed \[candidate, voter, vote\] - Vote(AccountId, AccountId, bool), - /// A vote has been placed for a defending member \[voter, vote\] - DefenderVote(AccountId, bool), - /// A new \[max\] member count has been set - NewMaxMembers(u32), - /// Society is unfounded. \[founder\] - Unfounded(AccountId), - /// Some funds were deposited into the society account. \[value\] - Deposit(Balance), - } -} - /// Simple ensure origin struct to filter for the founder account. pub struct EnsureFounder(sp_std::marker::PhantomData); impl EnsureOrigin for EnsureFounder { type Success = T::AccountId; fn try_origin(o: T::Origin) -> Result { o.into().and_then(|o| match (o, Founder::::get()) { - (system::RawOrigin::Signed(ref who), Some(ref f)) if who == f => Ok(who.clone()), + (frame_system::RawOrigin::Signed(ref who), Some(ref f)) if who == f => Ok(who.clone()), (r, _) => Err(T::Origin::from(r)), }) } @@ -1185,7 +1273,7 @@ impl EnsureOrigin for EnsureFounder { #[cfg(feature = "runtime-benchmarks")] fn successful_origin() -> T::Origin { let founder = Founder::::get().expect("society founder should exist"); - T::Origin::from(system::RawOrigin::Signed(founder)) + T::Origin::from(frame_system::RawOrigin::Signed(founder)) } } @@ -1203,7 +1291,7 @@ fn pick_usize<'a, R: RngCore>(rng: &mut R, max: usize) -> usize { (rng.next_u32() % (max as u32 + 1)) as usize } -impl, I: Instance> Module { +impl, I: 'static> Pallet { /// Puts a bid into storage ordered by smallest to largest value. /// Allows a maximum of 1000 bids in queue, removing largest value people first. fn put_bid( @@ -1251,7 +1339,7 @@ impl, I: Instance> Module { >::remove(&voucher); }, } - Self::deposit_event(RawEvent::AutoUnbid(popped)); + Self::deposit_event(Event::::AutoUnbid(popped)); } >::put(bids); @@ -1281,7 +1369,7 @@ impl, I: Instance> Module { /// Can fail when `MaxMember` limit is reached, but has no side-effects. fn add_member(who: &T::AccountId) -> DispatchResult { let mut members = >::get(); - ensure!(members.len() < MaxMembers::::get() as usize, Error::::MaxMembers); + ensure!(members.len() < MaxMembers::::get() as usize, Error::::MaxMembers); match members.binary_search(who) { // Add the new member Err(i) => { @@ -1338,8 +1426,8 @@ impl, I: Instance> Module { // out of society. members.reserve(candidates.len()); - let maturity = - >::block_number() + Self::lock_duration(members.len() as u32); + let maturity = >::block_number() + + Self::lock_duration(members.len() as u32); let mut rewardees = Vec::new(); let mut total_approvals = 0; @@ -1416,7 +1504,7 @@ impl, I: Instance> Module { } else { // Suspend Candidate >::insert(&candidate, (value, kind)); - Self::deposit_event(RawEvent::CandidateSuspended(candidate)); + Self::deposit_event(Event::::CandidateSuspended(candidate)); None } }) @@ -1485,7 +1573,7 @@ impl, I: Instance> Module { >::put(&primary); T::MembershipChanged::change_members_sorted(&accounts, &[], &members); - Self::deposit_event(RawEvent::Inducted(primary, accounts)); + Self::deposit_event(Event::::Inducted(primary, accounts)); } // Bump the pot by at most PeriodSpend, but less if there's not very much left in our @@ -1550,7 +1638,7 @@ impl, I: Instance> Module { if Self::remove_member(&who).is_ok() { >::insert(who, true); >::remove(who); - Self::deposit_event(RawEvent::MemberSuspended(who.clone())); + Self::deposit_event(Event::::MemberSuspended(who.clone())); } } @@ -1628,7 +1716,7 @@ impl, I: Instance> Module { let chosen = pick_item(&mut rng, &members[1..members.len() - 1]) .expect("exited if members empty; qed"); >::put(&chosen); - Self::deposit_event(RawEvent::Challenged(chosen.clone())); + Self::deposit_event(Event::::Challenged(chosen.clone())); } else { >::kill(); } @@ -1668,7 +1756,7 @@ impl, I: Instance> Module { members_len: usize, pot: BalanceOf, ) -> Vec>> { - let max_members = MaxMembers::::get() as usize; + let max_members = MaxMembers::::get() as usize; let mut max_selections: usize = (T::MaxCandidateIntake::get() as usize).min(max_members.saturating_sub(members_len)); @@ -1725,13 +1813,13 @@ impl, I: Instance> Module { } } -impl OnUnbalanced> for Module { - fn on_nonzero_unbalanced(amount: NegativeImbalanceOf) { +impl, I: 'static> OnUnbalanced> for Pallet { + fn on_nonzero_unbalanced(amount: NegativeImbalanceOf) { let numeric_amount = amount.peek(); // Must resolve into existing but better to be safe. let _ = T::Currency::resolve_creating(&Self::account_id(), amount); - Self::deposit_event(RawEvent::Deposit(numeric_amount)); + Self::deposit_event(Event::::Deposit(numeric_amount)); } } diff --git a/frame/society/src/mock.rs b/frame/society/src/mock.rs index 38c2586323135..9356c083f2331 100644 --- a/frame/society/src/mock.rs +++ b/frame/society/src/mock.rs @@ -20,10 +20,7 @@ use super::*; use crate as pallet_society; -use frame_support::{ - ord_parameter_types, parameter_types, - traits::{OnFinalize, OnInitialize}, -}; +use frame_support::{ord_parameter_types, parameter_types}; use frame_support_test::TestRandomness; use frame_system::EnsureSignedBy; use sp_core::H256; diff --git a/frame/staking/Cargo.toml b/frame/staking/Cargo.toml index aba19ba56357a..70637bcd7726f 100644 --- a/frame/staking/Cargo.toml +++ b/frame/staking/Cargo.toml @@ -39,9 +39,11 @@ rand_chacha = { version = "0.2", default-features = false, optional = true } [dev-dependencies] sp-tracing = { version = "4.0.0-dev", path = "../../primitives/tracing" } sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +sp-npos-elections = { version = "4.0.0-dev", path = "../../primitives/npos-elections" } pallet-balances = { version = "4.0.0-dev", path = "../balances" } pallet-timestamp = { version = "4.0.0-dev", path = "../timestamp" } pallet-staking-reward-curve = { version = "4.0.0-dev", path = "../staking/reward-curve" } +pallet-bags-list = { version = "4.0.0-dev", features = ["runtime-benchmarks"], path = "../bags-list" } substrate-test-utils = { version = "4.0.0-dev", path = "../../test-utils" } frame-benchmarking = { version = "4.0.0-dev", path = "../benchmarking" } frame-election-provider-support = { version = "4.0.0-dev", path = "../election-provider-support" } diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index bdc3d81f3c29b..220e8f1e6a24c 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -21,9 +21,11 @@ use super::*; use crate::Pallet as Staking; use testing_utils::*; +use frame_election_provider_support::SortedListProvider; use frame_support::{ + dispatch::UnfilteredDispatchable, pallet_prelude::*, - traits::{Currency, Get, Imbalance}, + traits::{Currency, CurrencyToVote, Get, Imbalance}, }; use sp_runtime::{ traits::{StaticLookup, Zero}, @@ -110,6 +112,8 @@ pub fn create_validator_with_nominators( assert_eq!(new_validators.len(), 1); assert_eq!(new_validators[0], v_stash, "Our validator was not selected!"); + assert_ne!(CounterForValidators::::get(), 0); + assert_ne!(CounterForNominators::::get(), 0); // Give Era Points let reward = EraRewardPoints:: { @@ -129,13 +133,91 @@ pub fn create_validator_with_nominators( Ok((v_stash, nominators)) } +struct ListScenario { + /// Stash that is expected to be moved. + origin_stash1: T::AccountId, + /// Controller of the Stash that is expected to be moved. + origin_controller1: T::AccountId, + dest_weight: BalanceOf, +} + +impl ListScenario { + /// An expensive scenario for bags-list implementation: + /// + /// - the node to be updated (r) is the head of a bag that has at least one other node. The bag + /// itself will need to be read and written to update its head. The node pointed to by r.next + /// will need to be read and written as it will need to have its prev pointer updated. Note + /// that there are two other worst case scenarios for bag removal: 1) the node is a tail and + /// 2) the node is a middle node with prev and next; all scenarios end up with the same number + /// of storage reads and writes. + /// + /// - the destination bag has at least one node, which will need its next pointer updated. + /// + /// NOTE: while this scenario specifically targets a worst case for the bags-list, it should + /// also elicit a worst case for other known `SortedListProvider` implementations; although + /// this may not be true against unknown `SortedListProvider` implementations. + fn new(origin_weight: BalanceOf, is_increase: bool) -> Result { + ensure!(!origin_weight.is_zero(), "origin weight must be greater than 0"); + + // burn the entire issuance. + let i = T::Currency::burn(T::Currency::total_issuance()); + sp_std::mem::forget(i); + + // create accounts with the origin weight + + let (origin_stash1, origin_controller1) = create_stash_controller_with_balance::( + USER_SEED + 2, + origin_weight, + Default::default(), + )?; + Staking::::nominate( + RawOrigin::Signed(origin_controller1.clone()).into(), + // NOTE: these don't really need to be validators. + vec![T::Lookup::unlookup(account("random_validator", 0, SEED))], + )?; + + let (_origin_stash2, origin_controller2) = create_stash_controller_with_balance::( + USER_SEED + 3, + origin_weight, + Default::default(), + )?; + Staking::::nominate( + RawOrigin::Signed(origin_controller2.clone()).into(), + vec![T::Lookup::unlookup(account("random_validator", 0, SEED))].clone(), + )?; + + // find a destination weight that will trigger the worst case scenario + let dest_weight_as_vote = + T::SortedListProvider::weight_update_worst_case(&origin_stash1, is_increase); + + let total_issuance = T::Currency::total_issuance(); + + let dest_weight = + T::CurrencyToVote::to_currency(dest_weight_as_vote as u128, total_issuance); + + // create an account with the worst case destination weight + let (_dest_stash1, dest_controller1) = create_stash_controller_with_balance::( + USER_SEED + 1, + dest_weight, + Default::default(), + )?; + Staking::::nominate( + RawOrigin::Signed(dest_controller1).into(), + vec![T::Lookup::unlookup(account("random_validator", 0, SEED))], + )?; + + Ok(ListScenario { origin_stash1, origin_controller1, dest_weight }) + } +} + const USER_SEED: u32 = 999666; benchmarks! { bond { let stash = create_funded_user::("stash", USER_SEED, 100); let controller = create_funded_user::("controller", USER_SEED, 100); - let controller_lookup: ::Source = T::Lookup::unlookup(controller.clone()); + let controller_lookup: ::Source + = T::Lookup::unlookup(controller.clone()); let reward_destination = RewardDestination::Staked; let amount = T::Currency::minimum_balance() * 10u32.into(); whitelist_account!(stash); @@ -146,10 +228,25 @@ benchmarks! { } bond_extra { - let (stash, controller) = create_stash_controller::(USER_SEED, 100, Default::default())?; - let max_additional = T::Currency::minimum_balance() * 10u32.into(); - let ledger = Ledger::::get(&controller).ok_or("ledger not created before")?; - let original_bonded: BalanceOf = ledger.active; + // clean up any existing state. + clear_validators_and_nominators::(); + + let origin_weight = MinNominatorBond::::get().max(T::Currency::minimum_balance()); + + // setup the worst case list scenario. + + // the weight the nominator will start at. + let scenario = ListScenario::::new(origin_weight, true)?; + + let max_additional = scenario.dest_weight.clone() - origin_weight; + + let stash = scenario.origin_stash1.clone(); + let controller = scenario.origin_controller1.clone(); + let original_bonded: BalanceOf + = Ledger::::get(&controller).map(|l| l.active).ok_or("ledger not created after")?; + + T::Currency::deposit_into_existing(&stash, max_additional).unwrap(); + whitelist_account!(stash); }: _(RawOrigin::Signed(stash), max_additional) verify { @@ -159,10 +256,25 @@ benchmarks! { } unbond { - let (_, controller) = create_stash_controller::(USER_SEED, 100, Default::default())?; - let amount = T::Currency::minimum_balance() * 10u32.into(); + use sp_std::convert::TryFrom; + // clean up any existing state. + clear_validators_and_nominators::(); + + // setup the worst case list scenario. + let total_issuance = T::Currency::total_issuance(); + // the weight the nominator will start at. The value used here is expected to be + // significantly higher than the first position in a list (e.g. the first bag threshold). + let origin_weight = BalanceOf::::try_from(952_994_955_240_703u128) + .map_err(|_| "balance expected to be a u128") + .unwrap(); + let scenario = ListScenario::::new(origin_weight, false)?; + + let stash = scenario.origin_stash1.clone(); + let controller = scenario.origin_controller1.clone(); + let amount = origin_weight - scenario.dest_weight.clone(); let ledger = Ledger::::get(&controller).ok_or("ledger not created before")?; let original_bonded: BalanceOf = ledger.active; + whitelist_account!(controller); }: _(RawOrigin::Signed(controller.clone()), amount) verify { @@ -194,26 +306,50 @@ benchmarks! { withdraw_unbonded_kill { // Slashing Spans let s in 0 .. MAX_SPANS; - let (stash, controller) = create_stash_controller::(0, 100, Default::default())?; - add_slashing_spans::(&stash, s); - let amount = T::Currency::minimum_balance() * 10u32.into(); - Staking::::unbond(RawOrigin::Signed(controller.clone()).into(), amount)?; + // clean up any existing state. + clear_validators_and_nominators::(); + + let origin_weight = MinNominatorBond::::get().max(T::Currency::minimum_balance()); + + // setup a worst case list scenario. Note that we don't care about the setup of the + // destination position because we are doing a removal from the list but no insert. + let scenario = ListScenario::::new(origin_weight, true)?; + let controller = scenario.origin_controller1.clone(); + let stash = scenario.origin_stash1.clone(); + assert!(T::SortedListProvider::contains(&stash)); + + let ed = T::Currency::minimum_balance(); + let mut ledger = Ledger::::get(&controller).unwrap(); + ledger.active = ed - One::one(); + Ledger::::insert(&controller, ledger); CurrentEra::::put(EraIndex::max_value()); - let ledger = Ledger::::get(&controller).ok_or("ledger not created before")?; - let original_total: BalanceOf = ledger.total; + whitelist_account!(controller); }: withdraw_unbonded(RawOrigin::Signed(controller.clone()), s) verify { assert!(!Ledger::::contains_key(controller)); + assert!(!T::SortedListProvider::contains(&stash)); } validate { - let (stash, controller) = create_stash_controller::(USER_SEED, 100, Default::default())?; + // clean up any existing state. + clear_validators_and_nominators::(); + + let origin_weight = MinNominatorBond::::get().max(T::Currency::minimum_balance()); + + // setup a worst case scenario where the user calling validate was formerly a nominator so + // they must be removed from the list. + let scenario = ListScenario::::new(origin_weight, true)?; + let controller = scenario.origin_controller1.clone(); + let stash = scenario.origin_stash1.clone(); + assert!(T::SortedListProvider::contains(&stash)); + let prefs = ValidatorPrefs::default(); whitelist_account!(controller); }: _(RawOrigin::Signed(controller), prefs) verify { - assert!(Validators::::contains_key(stash)); + assert!(Validators::::contains_key(&stash)); + assert!(!T::SortedListProvider::contains(&stash)); } kick { @@ -225,7 +361,7 @@ benchmarks! { // these are the other validators; there are `T::MAX_NOMINATIONS - 1` of them, so // there are a total of `T::MAX_NOMINATIONS` validators in the system. - let rest_of_validators = create_validators::(T::MAX_NOMINATIONS - 1, 100)?; + let rest_of_validators = create_validators_with_seed::(T::MAX_NOMINATIONS - 1, 100, 415)?; // this is the validator that will be kicking. let (stash, controller) = create_stash_controller::( @@ -282,18 +418,50 @@ benchmarks! { // Worst case scenario, T::MAX_NOMINATIONS nominate { let n in 1 .. T::MAX_NOMINATIONS; - let (stash, controller) = create_stash_controller::(n + 1, 100, Default::default())?; - let validators = create_validators::(n, 100)?; + + // clean up any existing state. + clear_validators_and_nominators::(); + + let origin_weight = MinNominatorBond::::get().max(T::Currency::minimum_balance()); + + // setup a worst case list scenario. Note we don't care about the destination position, because + // we are just doing an insert into the origin position. + let scenario = ListScenario::::new(origin_weight, true)?; + let (stash, controller) = create_stash_controller_with_balance::( + SEED + T::MAX_NOMINATIONS + 1, // make sure the account does not conflict with others + origin_weight, + Default::default(), + ).unwrap(); + + assert!(!Nominators::::contains_key(&stash)); + assert!(!T::SortedListProvider::contains(&stash)); + + let validators = create_validators::(n, 100).unwrap(); whitelist_account!(controller); }: _(RawOrigin::Signed(controller), validators) verify { - assert!(Nominators::::contains_key(stash)); + assert!(Nominators::::contains_key(&stash)); + assert!(T::SortedListProvider::contains(&stash)) } chill { - let (_, controller) = create_stash_controller::(USER_SEED, 100, Default::default())?; + // clean up any existing state. + clear_validators_and_nominators::(); + + let origin_weight = MinNominatorBond::::get().max(T::Currency::minimum_balance()); + + // setup a worst case list scenario. Note that we don't care about the setup of the + // destination position because we are doing a removal from the list but no insert. + let scenario = ListScenario::::new(origin_weight, true)?; + let controller = scenario.origin_controller1.clone(); + let stash = scenario.origin_stash1.clone(); + assert!(T::SortedListProvider::contains(&stash)); + whitelist_account!(controller); }: _(RawOrigin::Signed(controller)) + verify { + assert!(!T::SortedListProvider::contains(&stash)); + } set_payee { let (stash, controller) = create_stash_controller::(USER_SEED, 100, Default::default())?; @@ -345,11 +513,23 @@ benchmarks! { force_unstake { // Slashing Spans let s in 0 .. MAX_SPANS; - let (stash, controller) = create_stash_controller::(0, 100, Default::default())?; + // Clean up any existing state. + clear_validators_and_nominators::(); + + let origin_weight = MinNominatorBond::::get().max(T::Currency::minimum_balance()); + + // setup a worst case list scenario. Note that we don't care about the setup of the + // destination position because we are doing a removal from the list but no insert. + let scenario = ListScenario::::new(origin_weight, true)?; + let controller = scenario.origin_controller1.clone(); + let stash = scenario.origin_stash1.clone(); + assert!(T::SortedListProvider::contains(&stash)); add_slashing_spans::(&stash, s); - }: _(RawOrigin::Root, stash, s) + + }: _(RawOrigin::Root, stash.clone(), s) verify { assert!(!Ledger::::contains_key(&controller)); + assert!(!T::SortedListProvider::contains(&stash)); } cancel_deferred_slash { @@ -438,19 +618,46 @@ benchmarks! { rebond { let l in 1 .. MAX_UNLOCKING_CHUNKS as u32; - let (_, controller) = create_stash_controller::(USER_SEED, 100, Default::default())?; - let mut staking_ledger = Ledger::::get(controller.clone()).unwrap(); + + // clean up any existing state. + clear_validators_and_nominators::(); + + let origin_weight = MinNominatorBond::::get() + .max(T::Currency::minimum_balance()) + // we use 100 to play friendly with the list threshold values in the mock + .max(100u32.into()); + + // setup a worst case list scenario. + let scenario = ListScenario::::new(origin_weight, true)?; + let dest_weight = scenario.dest_weight.clone(); + + // rebond an amount that will give the user dest_weight + let rebond_amount = dest_weight - origin_weight; + + // spread that amount to rebond across `l` unlocking chunks, + let value = rebond_amount / l.into(); + // if `value` is zero, we need a greater delta between dest <=> origin weight + assert_ne!(value, Zero::zero()); + // so the sum of unlocking chunks puts voter into the dest bag. + assert!(value * l.into() + origin_weight > origin_weight); + assert!(value * l.into() + origin_weight <= dest_weight); let unlock_chunk = UnlockChunk::> { - value: 1u32.into(), + value, era: EraIndex::zero(), }; + + let stash = scenario.origin_stash1.clone(); + let controller = scenario.origin_controller1.clone(); + let mut staking_ledger = Ledger::::get(controller.clone()).unwrap(); + for _ in 0 .. l { staking_ledger.unlocking.push(unlock_chunk.clone()) } Ledger::::insert(controller.clone(), staking_ledger.clone()); let original_bonded: BalanceOf = staking_ledger.active; + whitelist_account!(controller); - }: _(RawOrigin::Signed(controller.clone()), (l + 100).into()) + }: _(RawOrigin::Signed(controller.clone()), rebond_amount) verify { let ledger = Ledger::::get(&controller).ok_or("ledger not created after")?; let new_bonded: BalanceOf = ledger.active; @@ -477,19 +684,28 @@ benchmarks! { reap_stash { let s in 1 .. MAX_SPANS; - let (stash, controller) = create_stash_controller::(0, 100, Default::default())?; - Staking::::validate(RawOrigin::Signed(controller.clone()).into(), ValidatorPrefs::default())?; + // clean up any existing state. + clear_validators_and_nominators::(); + + let origin_weight = MinNominatorBond::::get().max(T::Currency::minimum_balance()); + + // setup a worst case list scenario. Note that we don't care about the setup of the + // destination position because we are doing a removal from the list but no insert. + let scenario = ListScenario::::new(origin_weight, true)?; + let controller = scenario.origin_controller1.clone(); + let stash = scenario.origin_stash1.clone(); + add_slashing_spans::(&stash, s); T::Currency::make_free_balance_be(&stash, T::Currency::minimum_balance()); - whitelist_account!(controller); assert!(Bonded::::contains_key(&stash)); - assert!(Validators::::contains_key(&stash)); + assert!(T::SortedListProvider::contains(&stash)); + whitelist_account!(controller); }: _(RawOrigin::Signed(controller), stash.clone(), s) verify { assert!(!Bonded::::contains_key(&stash)); - assert!(!Validators::::contains_key(&stash)); + assert!(!T::SortedListProvider::contains(&stash)); } new_era { @@ -549,9 +765,15 @@ benchmarks! { >::insert(current_era, total_payout); let caller: T::AccountId = whitelisted_caller(); + let origin = RawOrigin::Signed(caller); + let calls: Vec<_> = payout_calls_arg.iter().map(|arg| + Call::::payout_stakers { validator_stash: arg.0.clone(), era: arg.1 }.encode() + ).collect(); }: { - for arg in payout_calls_arg { - >::payout_stakers(RawOrigin::Signed(caller.clone()).into(), arg.0, arg.1)?; + for call in calls { + as Decode>::decode(&mut &*call) + .expect("call is encoded above, encoding must be correct") + .dispatch_bypass_filter(origin.clone().into())?; } } @@ -590,17 +812,21 @@ benchmarks! { // total number of slashing spans. Assigned to validators randomly. let s in 1 .. 20; - let validators = create_validators_with_nominators_for_era::(v, n, T::MAX_NOMINATIONS as usize, false, None)? - .into_iter() - .map(|v| T::Lookup::lookup(v).unwrap()) - .collect::>(); + let validators = create_validators_with_nominators_for_era::( + v, n, T::MAX_NOMINATIONS as usize, false, None + )? + .into_iter() + .map(|v| T::Lookup::lookup(v).unwrap()) + .collect::>(); (0..s).for_each(|index| { add_slashing_spans::(&validators[index as usize], 10); }); + + let num_voters = (v + n) as usize; }: { - let voters = >::get_npos_voters(); - assert_eq!(voters.len() as u32, v + n); + let voters = >::get_npos_voters(None); + assert_eq!(voters.len(), num_voters); } get_npos_targets { @@ -609,7 +835,9 @@ benchmarks! { // number of nominator intention. let n = MAX_NOMINATORS; - let _ = create_validators_with_nominators_for_era::(v, n, T::MAX_NOMINATIONS as usize, false, None)?; + let _ = create_validators_with_nominators_for_era::( + v, n, T::MAX_NOMINATIONS as usize, false, None + )?; }: { let targets = >::get_npos_targets(); assert_eq!(targets.len() as u32, v); @@ -633,8 +861,18 @@ benchmarks! { } chill_other { - let (_, controller) = create_stash_controller::(USER_SEED, 100, Default::default())?; - Staking::::validate(RawOrigin::Signed(controller.clone()).into(), ValidatorPrefs::default())?; + // clean up any existing state. + clear_validators_and_nominators::(); + + let origin_weight = MinNominatorBond::::get().max(T::Currency::minimum_balance()); + + // setup a worst case list scenario. Note that we don't care about the setup of the + // destination position because we are doing a removal from the list but no insert. + let scenario = ListScenario::::new(origin_weight, true)?; + let controller = scenario.origin_controller1.clone(); + let stash = scenario.origin_stash1.clone(); + assert!(T::SortedListProvider::contains(&stash)); + Staking::::set_staking_limits( RawOrigin::Root.into(), BalanceOf::::max_value(), @@ -643,11 +881,19 @@ benchmarks! { Some(0), Some(Percent::from_percent(0)) )?; + let caller = whitelisted_caller(); }: _(RawOrigin::Signed(caller), controller.clone()) verify { - assert!(!Validators::::contains_key(controller)); + assert!(!T::SortedListProvider::contains(&stash)); } + + impl_benchmark_test_suite!( + Staking, + crate::mock::ExtBuilder::default().has_stakers(true), + crate::mock::Test, + exec_name = build_and_execute + ); } #[cfg(test)] @@ -658,7 +904,7 @@ mod tests { #[test] fn create_validators_with_nominators_for_era_works() { - ExtBuilder::default().has_stakers(true).build_and_execute(|| { + ExtBuilder::default().build_and_execute(|| { let v = 10; let n = 100; @@ -674,6 +920,9 @@ mod tests { let count_validators = Validators::::iter().count(); let count_nominators = Nominators::::iter().count(); + assert_eq!(count_validators, CounterForValidators::::get() as usize); + assert_eq!(count_nominators, CounterForNominators::::get() as usize); + assert_eq!(count_validators, v as usize); assert_eq!(count_nominators, n as usize); }); @@ -681,7 +930,7 @@ mod tests { #[test] fn create_validator_with_nominators_works() { - ExtBuilder::default().has_stakers(true).build_and_execute(|| { + ExtBuilder::default().build_and_execute(|| { let n = 10; let (validator_stash, nominators) = create_validator_with_nominators::( @@ -706,7 +955,7 @@ mod tests { #[test] fn add_slashing_spans_works() { - ExtBuilder::default().has_stakers(true).build_and_execute(|| { + ExtBuilder::default().build_and_execute(|| { let n = 10; let (validator_stash, _nominators) = create_validator_with_nominators::( @@ -738,7 +987,7 @@ mod tests { #[test] fn test_payout_all() { - ExtBuilder::default().has_stakers(true).build_and_execute(|| { + ExtBuilder::default().build_and_execute(|| { let v = 10; let n = 100; @@ -759,10 +1008,3 @@ mod tests { }); } } - -impl_benchmark_test_suite!( - Staking, - crate::mock::ExtBuilder::default().has_stakers(true), - crate::mock::Test, - exec_name = build_and_execute -); diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 31b35acdd99aa..582e9e49bd356 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -100,6 +100,13 @@ //! //! An account can become a nominator via the [`nominate`](Call::nominate) call. //! +//! #### Voting +//! +//! Staking is closely related to elections; actual validators are chosen from among all potential +//! validators via election by the potential validators and nominators. To reduce use of the phrase +//! "potential validators and nominators", we often use the term **voters**, who are simply +//! the union of potential validators and nominators. +//! //! #### Rewards and Slash //! //! The **reward and slashing** procedure is the core of the Staking pallet, attempting to _embrace @@ -264,15 +271,16 @@ //! - [Session](../pallet_session/index.html): Used to manage sessions. Also, a list of new //! validators is stored in the Session pallet's `Validators` at the end of each era. -#![recursion_limit = "128"] #![cfg_attr(not(feature = "std"), no_std)] +#![recursion_limit = "256"] #[cfg(feature = "runtime-benchmarks")] pub mod benchmarking; -#[cfg(test)] -mod mock; #[cfg(any(feature = "runtime-benchmarks", test))] pub mod testing_utils; + +#[cfg(test)] +pub(crate) mod mock; #[cfg(test)] mod tests; @@ -420,6 +428,7 @@ pub struct UnlockChunk { } /// The ledger of a (bonded) stash. +#[cfg_attr(feature = "runtime-benchmarks", derive(Default))] #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] pub struct StakingLedger { /// The stash account whose balance is actually locked and at stake. @@ -470,7 +479,9 @@ impl } /// Re-bond funds that were scheduled for unlocking. - fn rebond(mut self, value: Balance) -> Self { + /// + /// Returns the updated ledger, and the amount actually rebonded. + fn rebond(mut self, value: Balance) -> (Self, Balance) { let mut unlocking_balance: Balance = Zero::zero(); while let Some(last) = self.unlocking.last_mut() { @@ -491,7 +502,7 @@ impl } } - self + (self, unlocking_balance) } } @@ -727,11 +738,12 @@ enum Releases { V5_0_0, // blockable validators. V6_0_0, // removal of all storage associated with offchain phragmen. V7_0_0, // keep track of number of nominators / validators in map + V8_0_0, // populate `SortedListProvider`. } impl Default for Releases { fn default() -> Self { - Releases::V7_0_0 + Releases::V8_0_0 } } diff --git a/frame/staking/src/migrations.rs b/frame/staking/src/migrations.rs index d7fa2afc63082..7064f06dd12c7 100644 --- a/frame/staking/src/migrations.rs +++ b/frame/staking/src/migrations.rs @@ -18,6 +18,56 @@ use super::*; +pub mod v8 { + use frame_election_provider_support::SortedListProvider; + use frame_support::traits::Get; + + use crate::{Config, Nominators, Pallet, StorageVersion, Weight}; + + #[cfg(feature = "try-runtime")] + pub fn pre_migrate() -> Result<(), &'static str> { + frame_support::ensure!( + StorageVersion::::get() == crate::Releases::V7_0_0, + "must upgrade linearly" + ); + + crate::log!(info, "👜 staking bags-list migration passes PRE migrate checks ✅",); + Ok(()) + } + + /// Migration to sorted [`SortedListProvider`]. + pub fn migrate() -> Weight { + if StorageVersion::::get() == crate::Releases::V7_0_0 { + crate::log!(info, "migrating staking to Releases::V8_0_0"); + + let migrated = T::SortedListProvider::regenerate( + Nominators::::iter().map(|(id, _)| id), + Pallet::::weight_of_fn(), + ); + debug_assert_eq!(T::SortedListProvider::sanity_check(), Ok(())); + + StorageVersion::::put(crate::Releases::V8_0_0); + crate::log!( + info, + "👜 completed staking migration to Releases::V8_0_0 with {} voters migrated", + migrated, + ); + + T::BlockWeights::get().max_block + } else { + T::DbWeight::get().reads(1) + } + } + + #[cfg(feature = "try-runtime")] + pub fn post_migrate() -> Result<(), &'static str> { + T::SortedListProvider::sanity_check() + .map_err(|_| "SortedListProvider is not in a sane state.")?; + crate::log!(info, "👜 staking bags-list migration passes POST migrate checks ✅",); + Ok(()) + } +} + pub mod v7 { use super::*; diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index 0357fa05cb1dd..06c9be9c01e11 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -17,14 +17,12 @@ //! Test utilities -use crate as staking; -use crate::*; -use frame_election_provider_support::onchain; +use crate::{self as pallet_staking, *}; +use frame_election_provider_support::{onchain, SortedListProvider}; use frame_support::{ assert_ok, parameter_types, traits::{ - Currency, FindAuthor, GenesisBuild, Get, Hooks, Imbalance, OnInitialize, OnUnbalanced, - OneSessionHandler, + Currency, FindAuthor, GenesisBuild, Get, Hooks, Imbalance, OnUnbalanced, OneSessionHandler, }, weights::constants::RocksDbWeight, }; @@ -104,8 +102,9 @@ frame_support::construct_runtime!( Authorship: pallet_authorship::{Pallet, Call, Storage, Inherent}, Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, - Staking: staking::{Pallet, Call, Config, Storage, Event}, + Staking: pallet_staking::{Pallet, Call, Config, Storage, Event}, Session: pallet_session::{Pallet, Call, Storage, Event, Config}, + BagsList: pallet_bags_list::{Pallet, Call, Storage, Event}, } ); @@ -242,12 +241,26 @@ impl OnUnbalanced> for RewardRemainderMock { } } +const THRESHOLDS: [sp_npos_elections::VoteWeight; 9] = + [10, 20, 30, 40, 50, 60, 1_000, 2_000, 10_000]; + +parameter_types! { + pub static BagThresholds: &'static [sp_npos_elections::VoteWeight] = &THRESHOLDS; +} + +impl pallet_bags_list::Config for Test { + type Event = Event; + type WeightInfo = (); + type VoteWeightProvider = Staking; + type BagThresholds = BagThresholds; +} + impl onchain::Config for Test { type Accuracy = Perbill; type DataProvider = Staking; } -impl Config for Test { +impl crate::pallet::pallet::Config for Test { const MAX_NOMINATIONS: u32 = 16; type Currency = Balances; type UnixTime = Timestamp; @@ -267,6 +280,8 @@ impl Config for Test { type ElectionProvider = onchain::OnChainSequentialPhragmen; type GenesisElectionProvider = Self::ElectionProvider; type WeightInfo = (); + // NOTE: consider a macro and use `UseNominatorsMap` as well. + type SortedListProvider = BagsList; } impl frame_system::offchain::SendTransactionTypes for Test @@ -469,7 +484,7 @@ impl ExtBuilder { stakers.extend(self.stakers) } - let _ = staking::GenesisConfig:: { + let _ = pallet_staking::GenesisConfig:: { stakers, validator_count: self.validator_count, minimum_validator_count: self.minimum_validator_count, @@ -533,6 +548,10 @@ fn check_count() { let validator_count = Validators::::iter().count() as u32; assert_eq!(nominator_count, CounterForNominators::::get()); assert_eq!(validator_count, CounterForValidators::::get()); + + // the voters that the `SortedListProvider` list is storing for us. + let external_voters = ::SortedListProvider::count(); + assert_eq!(external_voters, nominator_count); } fn check_ledgers() { @@ -625,10 +644,14 @@ pub(crate) fn current_era() -> EraIndex { Staking::current_era().unwrap() } -pub(crate) fn bond_validator(stash: AccountId, ctrl: AccountId, val: Balance) { +pub(crate) fn bond(stash: AccountId, ctrl: AccountId, val: Balance) { let _ = Balances::make_free_balance_be(&stash, val); let _ = Balances::make_free_balance_be(&ctrl, val); assert_ok!(Staking::bond(Origin::signed(stash), ctrl, val, RewardDestination::Controller)); +} + +pub(crate) fn bond_validator(stash: AccountId, ctrl: AccountId, val: Balance) { + bond(stash, ctrl, val); assert_ok!(Staking::validate(Origin::signed(ctrl), ValidatorPrefs::default())); } @@ -638,9 +661,7 @@ pub(crate) fn bond_nominator( val: Balance, target: Vec, ) { - let _ = Balances::make_free_balance_be(&stash, val); - let _ = Balances::make_free_balance_be(&ctrl, val); - assert_ok!(Staking::bond(Origin::signed(stash), ctrl, val, RewardDestination::Controller)); + bond(stash, ctrl, val); assert_ok!(Staking::nominate(Origin::signed(ctrl), target)); } @@ -833,7 +854,7 @@ macro_rules! assert_session_era { }; } -pub(crate) fn staking_events() -> Vec> { +pub(crate) fn staking_events() -> Vec> { System::events() .into_iter() .map(|r| r.event) diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index fecd493eea022..3ae520872f278 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -17,7 +17,10 @@ //! Implementations for the Staking FRAME Pallet. -use frame_election_provider_support::{data_provider, ElectionProvider, Supports, VoteWeight}; +use frame_election_provider_support::{ + data_provider, ElectionDataProvider, ElectionProvider, SortedListProvider, Supports, + VoteWeight, VoteWeightProvider, +}; use frame_support::{ pallet_prelude::*, traits::{ @@ -26,6 +29,7 @@ use frame_support::{ }, weights::{Weight, WithPostDispatchInfo}, }; +use frame_system::pallet_prelude::BlockNumberFor; use pallet_session::historical; use sp_runtime::{ traits::{Bounded, Convert, SaturatedConversion, Saturating, Zero}, @@ -64,7 +68,7 @@ impl Pallet { /// /// This prevents call sites from repeatedly requesting `total_issuance` from backend. But it is /// important to be only used while the total issuance is not changing. - pub fn slashable_balance_of_fn() -> Box VoteWeight> { + pub fn weight_of_fn() -> Box VoteWeight> { // NOTE: changing this to unboxed `impl Fn(..)` return type and the pallet will still // compile, while some types in mock fail to resolve. let issuance = T::Currency::total_issuance(); @@ -73,6 +77,12 @@ impl Pallet { }) } + /// Same as `weight_of_fn`, but made for one time use. + pub fn weight_of(who: &T::AccountId) -> VoteWeight { + let issuance = T::Currency::total_issuance(); + Self::slashable_balance_of_vote_weight(who, issuance) + } + pub(super) fn do_payout_stakers( validator_stash: T::AccountId, era: EraIndex, @@ -629,54 +639,92 @@ impl Pallet { /// Get all of the voters that are eligible for the npos election. /// - /// This will use all on-chain nominators, and all the validators will inject a self vote. + /// `maybe_max_len` can imposes a cap on the number of voters returned; First all the validator + /// are included in no particular order, then remainder is taken from the nominators, as + /// returned by [`Config::SortedListProvider`]. + /// + /// This will use nominators, and all the validators will inject a self vote. /// /// This function is self-weighing as [`DispatchClass::Mandatory`]. /// /// ### Slashing /// /// All nominations that have been submitted before the last non-zero slash of the validator are - /// auto-chilled. - pub fn get_npos_voters() -> Vec<(T::AccountId, VoteWeight, Vec)> { - let weight_of = Self::slashable_balance_of_fn(); - let mut all_voters = Vec::new(); + /// auto-chilled, but still count towards the limit imposed by `maybe_max_len`. + pub fn get_npos_voters( + maybe_max_len: Option, + ) -> Vec<(T::AccountId, VoteWeight, Vec)> { + let max_allowed_len = { + let nominator_count = CounterForNominators::::get() as usize; + let validator_count = CounterForValidators::::get() as usize; + let all_voter_count = validator_count.saturating_add(nominator_count); + maybe_max_len.unwrap_or(all_voter_count).min(all_voter_count) + }; - let mut validator_count = 0u32; - for (validator, _) in >::iter() { + let mut all_voters = Vec::<_>::with_capacity(max_allowed_len); + + // first, grab all validators in no particular order, capped by the maximum allowed length. + let mut validators_taken = 0u32; + for (validator, _) in >::iter().take(max_allowed_len) { // Append self vote. - let self_vote = (validator.clone(), weight_of(&validator), vec![validator.clone()]); + let self_vote = + (validator.clone(), Self::weight_of(&validator), vec![validator.clone()]); all_voters.push(self_vote); - validator_count.saturating_inc(); + validators_taken.saturating_inc(); } - // Collect all slashing spans into a BTreeMap for further queries. + // .. and grab whatever we have left from nominators. + let nominators_quota = (max_allowed_len as u32).saturating_sub(validators_taken); let slashing_spans = >::iter().collect::>(); - let mut nominator_count = 0u32; - for (nominator, nominations) in Nominators::::iter() { - let Nominations { submitted_in, mut targets, suppressed: _ } = nominations; - - // Filter out nomination targets which were nominated before the most recent - // slashing span. - targets.retain(|stash| { - slashing_spans - .get(stash) - .map_or(true, |spans| submitted_in >= spans.last_nonzero_slash()) - }); + // track the count of nominators added to `all_voters + let mut nominators_taken = 0u32; + // track every nominator iterated over, but not necessarily added to `all_voters` + let mut nominators_seen = 0u32; + + let mut nominators_iter = T::SortedListProvider::iter(); + while nominators_taken < nominators_quota && nominators_seen < nominators_quota * 2 { + let nominator = match nominators_iter.next() { + Some(nominator) => { + nominators_seen.saturating_inc(); + nominator + }, + None => break, + }; - if !targets.is_empty() { - let vote_weight = weight_of(&nominator); - all_voters.push((nominator, vote_weight, targets)); - nominator_count.saturating_inc(); + if let Some(Nominations { submitted_in, mut targets, suppressed: _ }) = + >::get(&nominator) + { + targets.retain(|stash| { + slashing_spans + .get(stash) + .map_or(true, |spans| submitted_in >= spans.last_nonzero_slash()) + }); + if !targets.len().is_zero() { + all_voters.push((nominator.clone(), Self::weight_of(&nominator), targets)); + nominators_taken.saturating_inc(); + } + } else { + log!(error, "invalid item in `SortedListProvider`: {:?}", nominator) } } + // all_voters should have not re-allocated. + debug_assert!(all_voters.capacity() == max_allowed_len); + Self::register_weight(T::WeightInfo::get_npos_voters( - validator_count, - nominator_count, + validators_taken, + nominators_taken, slashing_spans.len() as u32, )); + log!( + info, + "generated {} npos voters, {} from validators and {} nominators", + all_voters.len(), + validators_taken, + nominators_taken + ); all_voters } @@ -698,34 +746,59 @@ impl Pallet { } /// This function will add a nominator to the `Nominators` storage map, - /// and keep track of the `CounterForNominators`. + /// [`SortedListProvider`] and keep track of the `CounterForNominators`. /// /// If the nominator already exists, their nominations will be updated. + /// + /// NOTE: you must ALWAYS use this function to add nominator or update their targets. Any access + /// to `Nominators`, its counter, or `VoterList` outside of this function is almost certainly + /// wrong. pub fn do_add_nominator(who: &T::AccountId, nominations: Nominations) { if !Nominators::::contains_key(who) { - CounterForNominators::::mutate(|x| x.saturating_inc()) + // maybe update the counter. + CounterForNominators::::mutate(|x| x.saturating_inc()); + + // maybe update sorted list. Error checking is defensive-only - this should never fail. + if T::SortedListProvider::on_insert(who.clone(), Self::weight_of(who)).is_err() { + log!(warn, "attempt to insert duplicate nominator ({:#?})", who); + debug_assert!(false, "attempt to insert duplicate nominator"); + }; + + debug_assert_eq!(T::SortedListProvider::sanity_check(), Ok(())); } + Nominators::::insert(who, nominations); } /// This function will remove a nominator from the `Nominators` storage map, - /// and keep track of the `CounterForNominators`. + /// [`SortedListProvider`] and keep track of the `CounterForNominators`. /// /// Returns true if `who` was removed from `Nominators`, otherwise false. + /// + /// NOTE: you must ALWAYS use this function to remove a nominator from the system. Any access to + /// `Nominators`, its counter, or `VoterList` outside of this function is almost certainly + /// wrong. pub fn do_remove_nominator(who: &T::AccountId) -> bool { if Nominators::::contains_key(who) { Nominators::::remove(who); CounterForNominators::::mutate(|x| x.saturating_dec()); + T::SortedListProvider::on_remove(who); + debug_assert_eq!(T::SortedListProvider::sanity_check(), Ok(())); + debug_assert_eq!(CounterForNominators::::get(), T::SortedListProvider::count()); true } else { false } } - /// This function will add a validator to the `Validators` storage map, - /// and keep track of the `CounterForValidators`. + /// This function will add a validator to the `Validators` storage map, and keep track of the + /// `CounterForValidators`. /// /// If the validator already exists, their preferences will be updated. + /// + /// NOTE: you must ALWAYS use this function to add a validator to the system. Any access to + /// `Validators`, its counter, or `VoterList` outside of this function is almost certainly + /// wrong. pub fn do_add_validator(who: &T::AccountId, prefs: ValidatorPrefs) { if !Validators::::contains_key(who) { CounterForValidators::::mutate(|x| x.saturating_inc()) @@ -737,6 +810,10 @@ impl Pallet { /// and keep track of the `CounterForValidators`. /// /// Returns true if `who` was removed from `Validators`, otherwise false. + /// + /// NOTE: you must ALWAYS use this function to remove a validator from the system. Any access to + /// `Validators`, its counter, or `VoterList` outside of this function is almost certainly + /// wrong. pub fn do_remove_validator(who: &T::AccountId) -> bool { if Validators::::contains_key(who) { Validators::::remove(who); @@ -758,10 +835,9 @@ impl Pallet { } } -impl frame_election_provider_support::ElectionDataProvider - for Pallet -{ +impl ElectionDataProvider> for Pallet { const MAXIMUM_VOTES_PER_VOTER: u32 = T::MAX_NOMINATIONS; + fn desired_targets() -> data_provider::Result { Self::register_weight(T::DbWeight::get().reads(1)); Ok(Self::validator_count()) @@ -770,30 +846,26 @@ impl frame_election_provider_support::ElectionDataProvider, ) -> data_provider::Result)>> { - let nominator_count = CounterForNominators::::get(); - let validator_count = CounterForValidators::::get(); - - let voter_count = nominator_count.saturating_add(validator_count) as usize; debug_assert!(>::iter().count() as u32 == CounterForNominators::::get()); debug_assert!(>::iter().count() as u32 == CounterForValidators::::get()); + debug_assert_eq!( + CounterForNominators::::get(), + T::SortedListProvider::count(), + "voter_count must be accurate", + ); - // register the extra 2 reads - Self::register_weight(T::DbWeight::get().reads(2)); - - if maybe_max_len.map_or(false, |max_len| voter_count > max_len) { - return Err("Voter snapshot too big") - } + // This can never fail -- if `maybe_max_len` is `Some(_)` we handle it. + let voters = Self::get_npos_voters(maybe_max_len); + debug_assert!(maybe_max_len.map_or(true, |max| voters.len() <= max)); - Ok(Self::get_npos_voters()) + Ok(voters) } fn targets(maybe_max_len: Option) -> data_provider::Result> { - let target_count = CounterForValidators::::get() as usize; + let target_count = CounterForValidators::::get(); - // register the extra 1 read - Self::register_weight(T::DbWeight::get().reads(1)); - - if maybe_max_len.map_or(false, |max_len| target_count > max_len) { + // We can't handle this case yet -- return an error. + if maybe_max_len.map_or(false, |max_len| target_count > max_len as u32) { return Err("Target snapshot too big") } @@ -879,6 +951,9 @@ impl frame_election_provider_support::ElectionDataProvider>::remove_all(None); >::remove_all(None); >::remove_all(None); + >::kill(); + >::kill(); + let _ = T::SortedListProvider::clear(None); } #[cfg(feature = "runtime-benchmarks")] @@ -1152,3 +1227,77 @@ where consumed_weight } } + +impl VoteWeightProvider for Pallet { + fn vote_weight(who: &T::AccountId) -> VoteWeight { + Self::weight_of(who) + } + + #[cfg(feature = "runtime-benchmarks")] + fn set_vote_weight_of(who: &T::AccountId, weight: VoteWeight) { + // this will clearly results in an inconsistent state, but it should not matter for a + // benchmark. + use sp_std::convert::TryInto; + let active: BalanceOf = weight.try_into().map_err(|_| ()).unwrap(); + let mut ledger = Self::ledger(who).unwrap_or_default(); + ledger.active = active; + >::insert(who, ledger); + >::insert(who, who); + + // also, we play a trick to make sure that a issuance based-`CurrencyToVote` behaves well: + // This will make sure that total issuance is zero, thus the currency to vote will be a 1-1 + // conversion. + let imbalance = T::Currency::burn(T::Currency::total_issuance()); + // kinda ugly, but gets the job done. The fact that this works here is a HUGE exception. + // Don't try this pattern in other places. + sp_std::mem::forget(imbalance); + } +} + +/// A simple voter list implementation that does not require any additional pallets. Note, this +/// does not provided nominators in sorted ordered. If you desire nominators in a sorted order take +/// a look at [`pallet-bags-list]. +pub struct UseNominatorsMap(sp_std::marker::PhantomData); +impl SortedListProvider for UseNominatorsMap { + type Error = (); + + /// Returns iterator over voter list, which can have `take` called on it. + fn iter() -> Box> { + Box::new(Nominators::::iter().map(|(n, _)| n)) + } + fn count() -> u32 { + CounterForNominators::::get() + } + fn contains(id: &T::AccountId) -> bool { + Nominators::::contains_key(id) + } + fn on_insert(_: T::AccountId, _weight: VoteWeight) -> Result<(), Self::Error> { + // nothing to do on insert. + Ok(()) + } + fn on_update(_: &T::AccountId, _weight: VoteWeight) { + // nothing to do on update. + } + fn on_remove(_: &T::AccountId) { + // nothing to do on remove. + } + fn regenerate( + _: impl IntoIterator, + _: Box VoteWeight>, + ) -> u32 { + // nothing to do upon regenerate. + 0 + } + fn sanity_check() -> Result<(), &'static str> { + Ok(()) + } + fn clear(maybe_count: Option) -> u32 { + Nominators::::remove_all(maybe_count); + if let Some(count) = maybe_count { + CounterForNominators::::mutate(|noms| *noms - count); + count + } else { + CounterForNominators::::take() + } + } +} diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs index d99cd89f3b06c..dad958ccaea2f 100644 --- a/frame/staking/src/pallet/mod.rs +++ b/frame/staking/src/pallet/mod.rs @@ -17,6 +17,7 @@ //! Staking FRAME Pallet. +use frame_election_provider_support::SortedListProvider; use frame_support::{ pallet_prelude::*, traits::{ @@ -38,7 +39,7 @@ mod impls; pub use impls::*; use crate::{ - migrations, slashing, weights::WeightInfo, ActiveEraInfo, BalanceOf, EraIndex, EraPayout, + log, migrations, slashing, weights::WeightInfo, ActiveEraInfo, BalanceOf, EraIndex, EraPayout, EraRewardPoints, Exposure, Forcing, NegativeImbalanceOf, Nominations, PositiveImbalanceOf, Releases, RewardDestination, SessionInterface, StakingLedger, UnappliedSlash, UnlockChunk, ValidatorPrefs, @@ -140,6 +141,11 @@ pub mod pallet { #[pallet::constant] type MaxNominatorRewardedPerValidator: Get; + /// Something that can provide a sorted list of voters in a somewhat sorted way. The + /// original use case for this was designed with [`pallet_bags_list::Pallet`] in mind. If + /// the bags-list is not desired, [`impls::UseNominatorsMap`] is likely the desired option. + type SortedListProvider: SortedListProvider; + /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; } @@ -492,6 +498,13 @@ pub mod pallet { MinValidatorBond::::put(self.min_validator_bond); for &(ref stash, ref controller, balance, ref status) in &self.stakers { + log!( + trace, + "inserting genesis staker: {:?} => {:?} => {:?}", + stash, + balance, + status + ); assert!( T::Currency::free_balance(&stash) >= balance, "Stash does not have enough balance to bond." @@ -514,6 +527,13 @@ pub mod pallet { _ => Ok(()), }); } + + // all voters are reported to the `SortedListProvider`. + assert_eq!( + T::SortedListProvider::count(), + CounterForNominators::::get(), + "not all genesis stakers were inserted into sorted list provider, something is wrong." + ); } } @@ -763,8 +783,15 @@ pub mod pallet { Error::::InsufficientBond ); - Self::deposit_event(Event::::Bonded(stash, extra)); + // NOTE: ledger must be updated prior to calling `Self::weight_of`. Self::update_ledger(&controller, &ledger); + // update this staker in the sorted list, if they exist in it. + if T::SortedListProvider::contains(&stash) { + T::SortedListProvider::on_update(&stash, Self::weight_of(&ledger.stash)); + debug_assert_eq!(T::SortedListProvider::sanity_check(), Ok(())); + } + + Self::deposit_event(Event::::Bonded(stash.clone(), extra)); } Ok(()) } @@ -823,7 +850,14 @@ pub mod pallet { // Note: in case there is no current era it is fine to bond one era more. let era = Self::current_era().unwrap_or(0) + T::BondingDuration::get(); ledger.unlocking.push(UnlockChunk { value, era }); + // NOTE: ledger must be updated prior to calling `Self::weight_of`. Self::update_ledger(&controller, &ledger); + + // update this staker in the sorted list, if they exist in it. + if T::SortedListProvider::contains(&ledger.stash) { + T::SortedListProvider::on_update(&ledger.stash, Self::weight_of(&ledger.stash)); + } + Self::deposit_event(Event::::Unbonded(ledger.stash, value)); } Ok(()) @@ -1314,12 +1348,17 @@ pub mod pallet { ensure!(!ledger.unlocking.is_empty(), Error::::NoUnlockChunk); let initial_unlocking = ledger.unlocking.len() as u32; - let ledger = ledger.rebond(value); + let (ledger, rebonded_value) = ledger.rebond(value); // Last check: the new active amount of ledger must be more than ED. ensure!(ledger.active >= T::Currency::minimum_balance(), Error::::InsufficientBond); - Self::deposit_event(Event::::Bonded(ledger.stash.clone(), value)); + Self::deposit_event(Event::::Bonded(ledger.stash.clone(), rebonded_value)); + + // NOTE: ledger must be updated prior to calling `Self::weight_of`. Self::update_ledger(&controller, &ledger); + if T::SortedListProvider::contains(&ledger.stash) { + T::SortedListProvider::on_update(&ledger.stash, Self::weight_of(&ledger.stash)); + } let removed_chunks = 1u32 // for the case where the last iterated chunk is not removed .saturating_add(initial_unlocking) @@ -1492,8 +1531,6 @@ pub mod pallet { /// /// This can be helpful if bond requirements are updated, and we need to remove old users /// who do not satisfy these requirements. - // TODO: Maybe we can deprecate `chill` in the future. - // https://github.com/paritytech/substrate/issues/9111 #[pallet::weight(T::WeightInfo::chill_other())] pub fn chill_other(origin: OriginFor, controller: T::AccountId) -> DispatchResult { // Anyone can call this function. diff --git a/frame/staking/src/testing_utils.rs b/frame/staking/src/testing_utils.rs index 795c066d09bb3..13762cf5886db 100644 --- a/frame/staking/src/testing_utils.rs +++ b/frame/staking/src/testing_utils.rs @@ -27,6 +27,7 @@ use rand_chacha::{ }; use sp_io::hashing::blake2_256; +use frame_election_provider_support::SortedListProvider; use frame_support::{pallet_prelude::*, traits::Currency}; use sp_runtime::{traits::StaticLookup, Perbill}; use sp_std::prelude::*; @@ -37,8 +38,11 @@ const SEED: u32 = 0; pub fn clear_validators_and_nominators() { Validators::::remove_all(None); CounterForValidators::::kill(); + + // whenever we touch nominators counter we should update `T::SortedListProvider` as well. Nominators::::remove_all(None); CounterForNominators::::kill(); + let _ = T::SortedListProvider::clear(None); } /// Grab a funded user. @@ -49,9 +53,18 @@ pub fn create_funded_user( ) -> T::AccountId { let user = account(string, n, SEED); let balance = T::Currency::minimum_balance() * balance_factor.into(); - T::Currency::make_free_balance_be(&user, balance); - // ensure T::CurrencyToVote will work correctly. - T::Currency::issue(balance); + let _ = T::Currency::make_free_balance_be(&user, balance); + user +} + +/// Grab a funded user with max Balance. +pub fn create_funded_user_with_balance( + string: &'static str, + n: u32, + balance: BalanceOf, +) -> T::AccountId { + let user = account(string, n, SEED); + let _ = T::Currency::make_free_balance_be(&user, balance); user } @@ -75,6 +88,26 @@ pub fn create_stash_controller( return Ok((stash, controller)) } +/// Create a stash and controller pair with fixed balance. +pub fn create_stash_controller_with_balance( + n: u32, + balance: crate::BalanceOf, + destination: RewardDestination, +) -> Result<(T::AccountId, T::AccountId), &'static str> { + let stash = create_funded_user_with_balance::("stash", n, balance); + let controller = create_funded_user_with_balance::("controller", n, balance); + let controller_lookup: ::Source = + T::Lookup::unlookup(controller.clone()); + + Staking::::bond( + RawOrigin::Signed(stash.clone()).into(), + controller_lookup, + balance, + destination, + )?; + Ok((stash, controller)) +} + /// Create a stash and controller pair, where the controller is dead, and payouts go to controller. /// This is used to test worst case payout scenarios. pub fn create_stash_and_dead_controller( @@ -101,11 +134,20 @@ pub fn create_stash_and_dead_controller( pub fn create_validators( max: u32, balance_factor: u32, +) -> Result::Source>, &'static str> { + create_validators_with_seed::(max, balance_factor, 0) +} + +/// create `max` validators, with a seed to help unintentional prevent account collisions. +pub fn create_validators_with_seed( + max: u32, + balance_factor: u32, + seed: u32, ) -> Result::Source>, &'static str> { let mut validators: Vec<::Source> = Vec::with_capacity(max as usize); for i in 0..max { let (stash, controller) = - create_stash_controller::(i, balance_factor, RewardDestination::Staked)?; + create_stash_controller::(i + seed, balance_factor, RewardDestination::Staked)?; let validator_prefs = ValidatorPrefs { commission: Perbill::from_percent(50), ..Default::default() }; Staking::::validate(RawOrigin::Signed(controller).into(), validator_prefs)?; diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 97dfaa39c84a9..6f024eb1e6b04 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -18,12 +18,12 @@ //! Tests for the module. use super::{Event, *}; -use frame_election_provider_support::{ElectionProvider, Support}; +use frame_election_provider_support::{ElectionProvider, SortedListProvider, Support}; use frame_support::{ assert_noop, assert_ok, dispatch::WithPostDispatchInfo, pallet_prelude::*, - traits::{Currency, Get, OnInitialize, ReservableCurrency}, + traits::{Currency, Get, ReservableCurrency}, weights::{extract_actual_weight, GetDispatchInfo}, }; use mock::*; @@ -542,8 +542,8 @@ fn nominating_and_rewards_should_work() { total: 1000 + 800, own: 1000, others: vec![ - IndividualExposure { who: 3, value: 400 }, IndividualExposure { who: 1, value: 400 }, + IndividualExposure { who: 3, value: 400 }, ] }, ); @@ -553,8 +553,8 @@ fn nominating_and_rewards_should_work() { total: 1000 + 1200, own: 1000, others: vec![ - IndividualExposure { who: 3, value: 600 }, IndividualExposure { who: 1, value: 600 }, + IndividualExposure { who: 3, value: 600 }, ] }, ); @@ -1517,6 +1517,65 @@ fn rebond_is_fifo() { }) } +#[test] +fn rebond_emits_right_value_in_event() { + // When a user calls rebond with more than can be rebonded, things succeed, + // and the rebond event emits the actual value rebonded. + ExtBuilder::default().nominate(false).build_and_execute(|| { + // Set payee to controller. avoids confusion + assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); + + // Give account 11 some large free balance greater than total + let _ = Balances::make_free_balance_be(&11, 1000000); + + // confirm that 10 is a normal validator and gets paid at the end of the era. + mock::start_active_era(1); + + // Unbond almost all of the funds in stash. + Staking::unbond(Origin::signed(10), 900).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 100, + unlocking: vec![UnlockChunk { value: 900, era: 1 + 3 }], + claimed_rewards: vec![], + }) + ); + + // Re-bond less than the total + Staking::rebond(Origin::signed(10), 100).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 200, + unlocking: vec![UnlockChunk { value: 800, era: 1 + 3 }], + claimed_rewards: vec![], + }) + ); + // Event emitted should be correct + assert_eq!(*staking_events().last().unwrap(), Event::Bonded(11, 100)); + + // Re-bond way more than available + Staking::rebond(Origin::signed(10), 100_000).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); + // Event emitted should be correct, only 800 + assert_eq!(*staking_events().last().unwrap(), Event::Bonded(11, 800)); + }); +} + #[test] fn reward_to_stake_works() { ExtBuilder::default() @@ -1907,8 +1966,8 @@ fn bond_with_duplicate_vote_should_be_ignored_by_election_provider() { assert_eq!( supports, vec![ - (21, Support { total: 1800, voters: vec![(21, 1000), (3, 400), (1, 400)] }), - (31, Support { total: 2200, voters: vec![(31, 1000), (3, 600), (1, 600)] }) + (21, Support { total: 1800, voters: vec![(21, 1000), (1, 400), (3, 400)] }), + (31, Support { total: 2200, voters: vec![(31, 1000), (1, 600), (3, 600)] }) ], ); }); @@ -1952,7 +2011,7 @@ fn bond_with_duplicate_vote_should_be_ignored_by_election_provider_elected() { supports, vec![ (11, Support { total: 1500, voters: vec![(11, 1000), (1, 500)] }), - (21, Support { total: 2500, voters: vec![(21, 1000), (3, 1000), (1, 500)] }) + (21, Support { total: 2500, voters: vec![(21, 1000), (1, 500), (3, 1000)] }) ], ); }); @@ -3881,11 +3940,137 @@ mod election_data_provider { } #[test] - fn respects_len_limits() { - ExtBuilder::default().build_and_execute(|| { - assert_eq!(Staking::voters(Some(1)).unwrap_err(), "Voter snapshot too big"); - assert_eq!(Staking::targets(Some(1)).unwrap_err(), "Target snapshot too big"); - }); + fn respects_snapshot_len_limits() { + ExtBuilder::default() + .set_status(41, StakerStatus::Validator) + .build_and_execute(|| { + // sum of all nominators who'd be voters (1), plus the self-votes (4). + assert_eq!( + ::SortedListProvider::count() + + >::iter().count() as u32, + 5 + ); + + // if limits is less.. + assert_eq!(Staking::voters(Some(1)).unwrap().len(), 1); + + // if limit is equal.. + assert_eq!(Staking::voters(Some(5)).unwrap().len(), 5); + + // if limit is more. + assert_eq!(Staking::voters(Some(55)).unwrap().len(), 5); + + // if target limit is more.. + assert_eq!(Staking::targets(Some(6)).unwrap().len(), 4); + assert_eq!(Staking::targets(Some(4)).unwrap().len(), 4); + + // if target limit is less, then we return an error. + assert_eq!(Staking::targets(Some(1)).unwrap_err(), "Target snapshot too big"); + }); + } + + #[test] + fn only_iterates_max_2_times_nominators_quota() { + ExtBuilder::default() + .nominate(true) // add nominator 101, who nominates [11, 21] + // the other nominators only nominate 21 + .add_staker(61, 60, 2_000, StakerStatus::::Nominator(vec![21])) + .add_staker(71, 70, 2_000, StakerStatus::::Nominator(vec![21])) + .add_staker(81, 80, 2_000, StakerStatus::::Nominator(vec![21])) + .build_and_execute(|| { + // given our nominators ordered by stake, + assert_eq!( + ::SortedListProvider::iter().collect::>(), + vec![61, 71, 81, 101] + ); + + // and total voters + assert_eq!( + ::SortedListProvider::count() + + >::iter().count() as u32, + 7 + ); + + // roll to session 5 + run_to_block(25); + + // slash 21, the only validator nominated by our first 3 nominators + add_slash(&21); + + // we take 4 voters: 2 validators and 2 nominators (so nominators quota = 2) + assert_eq!( + Staking::voters(Some(3)) + .unwrap() + .iter() + .map(|(stash, _, _)| stash) + .copied() + .collect::>(), + vec![31, 11], // 2 validators, but no nominators because we hit the quota + ); + }); + } + + // Even if some of the higher staked nominators are slashed, we still get up to max len voters + // by adding more lower staked nominators. In other words, we assert that we keep on adding + // valid nominators until we reach max len voters; which is opposed to simply stopping after we + // have iterated max len voters, but not adding all of them to voters due to some nominators not + // having valid targets. + #[test] + fn get_max_len_voters_even_if_some_nominators_are_slashed() { + ExtBuilder::default() + .nominate(true) // add nominator 101, who nominates [11, 21] + .add_staker(61, 60, 20, StakerStatus::::Nominator(vec![21])) + // 61 only nominates validator 21 ^^ + .add_staker(71, 70, 10, StakerStatus::::Nominator(vec![11, 21])) + .build_and_execute(|| { + // given our nominators ordered by stake, + assert_eq!( + ::SortedListProvider::iter().collect::>(), + vec![101, 61, 71] + ); + + // and total voters + assert_eq!( + ::SortedListProvider::count() + + >::iter().count() as u32, + 6 + ); + + // we take 5 voters + assert_eq!( + Staking::voters(Some(5)) + .unwrap() + .iter() + .map(|(stash, _, _)| stash) + .copied() + .collect::>(), + // then + vec![ + 31, 21, 11, // 3 nominators + 101, 61 // 2 validators, and 71 is excluded + ], + ); + + // roll to session 5 + run_to_block(25); + + // slash 21, the only validator nominated by 61 + add_slash(&21); + + // we take 4 voters + assert_eq!( + Staking::voters(Some(4)) + .unwrap() + .iter() + .map(|(stash, _, _)| stash) + .copied() + .collect::>(), + vec![ + 31, 11, // 2 validators (21 was slashed) + 101, 71 // 2 nominators, excluding 61 + ], + ); + }); } #[test] @@ -4232,3 +4417,28 @@ fn capped_stakers_works() { assert_ok!(Staking::validate(Origin::signed(last_validator), ValidatorPrefs::default())); }) } + +mod sorted_list_provider { + use super::*; + use frame_election_provider_support::SortedListProvider; + + #[test] + fn re_nominate_does_not_change_counters_or_list() { + ExtBuilder::default().nominate(true).build_and_execute(|| { + // given + let pre_insert_nominator_count = Nominators::::iter().count() as u32; + assert_eq!(::SortedListProvider::count(), pre_insert_nominator_count); + assert!(Nominators::::contains_key(101)); + assert_eq!(::SortedListProvider::iter().collect::>(), vec![101]); + + // when account 101 renominates + assert_ok!(Staking::nominate(Origin::signed(100), vec![41])); + + // then counts don't change + assert_eq!(::SortedListProvider::count(), pre_insert_nominator_count); + assert_eq!(Nominators::::iter().count() as u32, pre_insert_nominator_count); + // and the list is the same + assert_eq!(::SortedListProvider::iter().collect::>(), vec![101]); + }); + } +} diff --git a/frame/staking/src/weights.rs b/frame/staking/src/weights.rs index 0bcf179e29339..32c8dc80da158 100644 --- a/frame/staking/src/weights.rs +++ b/frame/staking/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_staking //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2021-08-18, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2021-09-04, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -85,37 +85,42 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn bond() -> Weight { - (73_523_000 as Weight) + (73_865_000 as Weight) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) // Storage: Balances Locks (r:1 w:1) + // Storage: BagsList ListNodes (r:3 w:3) + // Storage: BagsList ListBags (r:2 w:2) fn bond_extra() -> Weight { - (58_129_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + (114_296_000 as Weight) + .saturating_add(T::DbWeight::get().reads(8 as Weight)) + .saturating_add(T::DbWeight::get().writes(7 as Weight)) } // Storage: Staking Ledger (r:1 w:1) // Storage: Staking Nominators (r:1 w:0) - // Storage: Staking Validators (r:1 w:0) + // Storage: Staking MinNominatorBond (r:1 w:0) // Storage: Staking CurrentEra (r:1 w:0) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) + // Storage: BagsList ListNodes (r:3 w:3) + // Storage: Staking Bonded (r:1 w:0) + // Storage: BagsList ListBags (r:2 w:2) fn unbond() -> Weight { - (61_542_000 as Weight) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + (121_737_000 as Weight) + .saturating_add(T::DbWeight::get().reads(12 as Weight)) + .saturating_add(T::DbWeight::get().writes(8 as Weight)) } // Storage: Staking Ledger (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn withdraw_unbonded_update(s: u32, ) -> Weight { - (53_160_000 as Weight) + (51_631_000 as Weight) // Standard Error: 0 - .saturating_add((53_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((55_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } @@ -124,36 +129,40 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Bonded (r:1 w:1) // Storage: Staking SlashingSpans (r:1 w:0) // Storage: Staking Validators (r:1 w:0) - // Storage: Staking Nominators (r:1 w:0) + // Storage: Staking Nominators (r:1 w:1) + // Storage: Staking CounterForNominators (r:1 w:1) + // Storage: BagsList ListNodes (r:2 w:2) + // Storage: BagsList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) - // Storage: Staking SpanSlash (r:0 w:2) - fn withdraw_unbonded_kill(s: u32, ) -> Weight { - (85_826_000 as Weight) - // Standard Error: 2_000 - .saturating_add((2_453_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(8 as Weight)) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + fn withdraw_unbonded_kill(_s: u32, ) -> Weight { + (101_870_000 as Weight) + .saturating_add(T::DbWeight::get().reads(13 as Weight)) + .saturating_add(T::DbWeight::get().writes(11 as Weight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking MinValidatorBond (r:1 w:0) // Storage: Staking Validators (r:1 w:1) // Storage: Staking MaxValidatorsCount (r:1 w:0) - // Storage: Staking Nominators (r:1 w:0) + // Storage: Staking Nominators (r:1 w:1) + // Storage: Staking CounterForNominators (r:1 w:1) + // Storage: BagsList ListNodes (r:2 w:2) + // Storage: BagsList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: Staking CounterForValidators (r:1 w:1) fn validate() -> Weight { - (34_936_000 as Weight) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + (69_092_000 as Weight) + .saturating_add(T::DbWeight::get().reads(11 as Weight)) + .saturating_add(T::DbWeight::get().writes(8 as Weight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) fn kick(k: u32, ) -> Weight { - (23_493_000 as Weight) - // Standard Error: 17_000 - .saturating_add((16_632_000 as Weight).saturating_mul(k as Weight)) + (21_468_000 as Weight) + // Standard Error: 19_000 + .saturating_add((16_415_000 as Weight).saturating_mul(k as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(k as Weight))) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) @@ -165,84 +174,97 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Validators (r:2 w:0) // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking CounterForNominators (r:1 w:1) + // Storage: Staking Bonded (r:1 w:0) + // Storage: BagsList ListNodes (r:2 w:2) + // Storage: BagsList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) fn nominate(n: u32, ) -> Weight { - (41_733_000 as Weight) - // Standard Error: 11_000 - .saturating_add((5_840_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) + (82_389_000 as Weight) + // Standard Error: 14_000 + .saturating_add((5_597_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(12 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(6 as Weight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Validators (r:1 w:0) - // Storage: Staking Nominators (r:1 w:0) + // Storage: Staking Nominators (r:1 w:1) + // Storage: Staking CounterForNominators (r:1 w:1) + // Storage: BagsList ListNodes (r:2 w:2) + // Storage: BagsList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) fn chill() -> Weight { - (17_901_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) + (69_655_000 as Weight) + .saturating_add(T::DbWeight::get().reads(8 as Weight)) + .saturating_add(T::DbWeight::get().writes(6 as Weight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Payee (r:0 w:1) fn set_payee() -> Weight { - (13_760_000 as Weight) + (12_770_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Staking Bonded (r:1 w:1) // Storage: Staking Ledger (r:2 w:2) fn set_controller() -> Weight { - (28_388_000 as Weight) + (27_756_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Staking ValidatorCount (r:0 w:1) fn set_validator_count() -> Weight { - (2_537_000 as Weight) + (2_446_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Staking ForceEra (r:0 w:1) fn force_no_eras() -> Weight { - (2_749_000 as Weight) + (2_720_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era() -> Weight { - (2_834_000 as Weight) + (2_711_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era_always() -> Weight { - (2_800_000 as Weight) + (2_796_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Staking Invulnerables (r:0 w:1) fn set_invulnerables(v: u32, ) -> Weight { - (3_429_000 as Weight) + (3_141_000 as Weight) // Standard Error: 0 - .saturating_add((56_000 as Weight).saturating_mul(v as Weight)) + .saturating_add((53_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Staking Bonded (r:1 w:1) // Storage: Staking SlashingSpans (r:1 w:0) // Storage: Staking Validators (r:1 w:0) - // Storage: Staking Nominators (r:1 w:0) + // Storage: Staking Nominators (r:1 w:1) + // Storage: Staking CounterForNominators (r:1 w:1) + // Storage: BagsList ListNodes (r:2 w:2) + // Storage: BagsList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: Staking Ledger (r:0 w:1) // Storage: Staking Payee (r:0 w:1) // Storage: Staking SpanSlash (r:0 w:2) fn force_unstake(s: u32, ) -> Weight { - (61_799_000 as Weight) + (97_394_000 as Weight) // Standard Error: 2_000 - .saturating_add((2_451_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + .saturating_add((2_370_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(11 as Weight)) + .saturating_add(T::DbWeight::get().writes(12 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } // Storage: Staking UnappliedSlashes (r:1 w:1) fn cancel_deferred_slash(s: u32, ) -> Weight { - (3_383_988_000 as Weight) - // Standard Error: 223_000 - .saturating_add((19_981_000 as Weight).saturating_mul(s as Weight)) + (2_783_746_000 as Weight) + // Standard Error: 182_000 + .saturating_add((16_223_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -257,9 +279,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Payee (r:2 w:0) // Storage: System Account (r:2 w:2) fn payout_stakers_dead_controller(n: u32, ) -> Weight { - (124_714_000 as Weight) - // Standard Error: 23_000 - .saturating_add((47_575_000 as Weight).saturating_mul(n as Weight)) + (109_233_000 as Weight) + // Standard Error: 17_000 + .saturating_add((47_612_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(10 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) @@ -277,9 +299,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:2 w:2) // Storage: Balances Locks (r:2 w:2) fn payout_stakers_alive_staked(n: u32, ) -> Weight { - (160_203_000 as Weight) - // Standard Error: 24_000 - .saturating_add((61_321_000 as Weight).saturating_mul(n as Weight)) + (177_392_000 as Weight) + // Standard Error: 20_000 + .saturating_add((60_771_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(11 as Weight)) .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(3 as Weight)) @@ -288,12 +310,15 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Ledger (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) + // Storage: BagsList ListNodes (r:3 w:3) + // Storage: Staking Bonded (r:1 w:0) + // Storage: BagsList ListBags (r:2 w:2) fn rebond(l: u32, ) -> Weight { - (49_593_000 as Weight) - // Standard Error: 3_000 - .saturating_add((78_000 as Weight).saturating_mul(l as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + (111_858_000 as Weight) + // Standard Error: 4_000 + .saturating_add((36_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(T::DbWeight::get().reads(9 as Weight)) + .saturating_add(T::DbWeight::get().writes(8 as Weight)) } // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking HistoryDepth (r:1 w:1) @@ -306,8 +331,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking ErasStartSessionIndex (r:0 w:1) fn set_history_depth(e: u32, ) -> Weight { (0 as Weight) - // Standard Error: 71_000 - .saturating_add((35_237_000 as Weight).saturating_mul(e as Weight)) + // Standard Error: 68_000 + .saturating_add((33_495_000 as Weight).saturating_mul(e as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) .saturating_add(T::DbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) @@ -315,19 +340,22 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) // Storage: Staking Bonded (r:1 w:1) // Storage: Staking SlashingSpans (r:1 w:1) - // Storage: Staking Validators (r:1 w:1) - // Storage: Staking CounterForValidators (r:1 w:1) - // Storage: Staking Nominators (r:1 w:0) + // Storage: Staking Validators (r:1 w:0) + // Storage: Staking Nominators (r:1 w:1) + // Storage: Staking CounterForNominators (r:1 w:1) + // Storage: BagsList ListNodes (r:2 w:2) + // Storage: BagsList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: Staking Ledger (r:0 w:1) // Storage: Staking Payee (r:0 w:1) // Storage: Staking SpanSlash (r:0 w:1) fn reap_stash(s: u32, ) -> Weight { - (72_484_000 as Weight) - // Standard Error: 2_000 - .saturating_add((2_452_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) - .saturating_add(T::DbWeight::get().writes(8 as Weight)) + (100_178_000 as Weight) + // Standard Error: 1_000 + .saturating_add((2_358_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(11 as Weight)) + .saturating_add(T::DbWeight::get().writes(12 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } // Storage: Staking CounterForNominators (r:1 w:0) @@ -336,7 +364,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Bonded (r:101 w:0) // Storage: Staking Ledger (r:101 w:0) // Storage: Staking SlashingSpans (r:1 w:0) - // Storage: Staking Nominators (r:101 w:0) + // Storage: BagsList ListBags (r:200 w:0) + // Storage: BagsList ListNodes (r:100 w:0) + // Storage: Staking Nominators (r:100 w:0) // Storage: Staking ValidatorCount (r:1 w:0) // Storage: Staking MinimumValidatorCount (r:1 w:0) // Storage: Staking CurrentEra (r:1 w:1) @@ -348,39 +378,43 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking ErasStartSessionIndex (r:0 w:1) fn new_era(v: u32, n: u32, ) -> Weight { (0 as Weight) - // Standard Error: 856_000 - .saturating_add((305_057_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 860_000 + .saturating_add((298_721_000 as Weight).saturating_mul(v as Weight)) // Standard Error: 43_000 - .saturating_add((47_890_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(9 as Weight)) + .saturating_add((49_427_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(208 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) - .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) + .saturating_add(T::DbWeight::get().reads((4 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(3 as Weight)) .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) } + // Storage: Staking CounterForNominators (r:1 w:0) + // Storage: Staking CounterForValidators (r:1 w:0) // Storage: Staking Validators (r:501 w:0) // Storage: Staking Bonded (r:1500 w:0) // Storage: Staking Ledger (r:1500 w:0) // Storage: Staking SlashingSpans (r:21 w:0) - // Storage: Staking Nominators (r:1001 w:0) + // Storage: BagsList ListBags (r:200 w:0) + // Storage: BagsList ListNodes (r:1000 w:0) + // Storage: Staking Nominators (r:1000 w:0) fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight { (0 as Weight) - // Standard Error: 98_000 - .saturating_add((25_610_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 98_000 - .saturating_add((28_064_000 as Weight).saturating_mul(n as Weight)) - // Standard Error: 3_346_000 - .saturating_add((18_123_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) + // Standard Error: 91_000 + .saturating_add((26_605_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 91_000 + .saturating_add((31_481_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 3_122_000 + .saturating_add((16_672_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(204 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) - .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) + .saturating_add(T::DbWeight::get().reads((4 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) } // Storage: Staking Validators (r:501 w:0) fn get_npos_targets(v: u32, ) -> Weight { - (30_422_000 as Weight) - // Standard Error: 33_000 - .saturating_add((11_252_000 as Weight).saturating_mul(v as Weight)) + (0 as Weight) + // Standard Error: 34_000 + .saturating_add((10_558_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) } @@ -390,20 +424,23 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking MaxNominatorsCount (r:0 w:1) // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_limits() -> Weight { - (6_486_000 as Weight) + (6_353_000 as Weight) .saturating_add(T::DbWeight::get().writes(5 as Weight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking ChillThreshold (r:1 w:0) - // Storage: Staking Nominators (r:1 w:0) - // Storage: Staking Validators (r:1 w:1) - // Storage: Staking MaxValidatorsCount (r:1 w:0) - // Storage: Staking CounterForValidators (r:1 w:1) - // Storage: Staking MinValidatorBond (r:1 w:0) + // Storage: Staking Nominators (r:1 w:1) + // Storage: Staking MaxNominatorsCount (r:1 w:0) + // Storage: Staking CounterForNominators (r:1 w:1) + // Storage: Staking MinNominatorBond (r:1 w:0) + // Storage: Staking Validators (r:1 w:0) + // Storage: BagsList ListNodes (r:2 w:2) + // Storage: BagsList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) fn chill_other() -> Weight { - (58_222_000 as Weight) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + (83_389_000 as Weight) + .saturating_add(T::DbWeight::get().reads(11 as Weight)) + .saturating_add(T::DbWeight::get().writes(6 as Weight)) } } @@ -416,37 +453,42 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn bond() -> Weight { - (73_523_000 as Weight) + (73_865_000 as Weight) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) // Storage: Balances Locks (r:1 w:1) + // Storage: BagsList ListNodes (r:3 w:3) + // Storage: BagsList ListBags (r:2 w:2) fn bond_extra() -> Weight { - (58_129_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + (114_296_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(8 as Weight)) + .saturating_add(RocksDbWeight::get().writes(7 as Weight)) } // Storage: Staking Ledger (r:1 w:1) // Storage: Staking Nominators (r:1 w:0) - // Storage: Staking Validators (r:1 w:0) + // Storage: Staking MinNominatorBond (r:1 w:0) // Storage: Staking CurrentEra (r:1 w:0) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) + // Storage: BagsList ListNodes (r:3 w:3) + // Storage: Staking Bonded (r:1 w:0) + // Storage: BagsList ListBags (r:2 w:2) fn unbond() -> Weight { - (61_542_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + (121_737_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(12 as Weight)) + .saturating_add(RocksDbWeight::get().writes(8 as Weight)) } // Storage: Staking Ledger (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn withdraw_unbonded_update(s: u32, ) -> Weight { - (53_160_000 as Weight) + (51_631_000 as Weight) // Standard Error: 0 - .saturating_add((53_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((55_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } @@ -455,36 +497,40 @@ impl WeightInfo for () { // Storage: Staking Bonded (r:1 w:1) // Storage: Staking SlashingSpans (r:1 w:0) // Storage: Staking Validators (r:1 w:0) - // Storage: Staking Nominators (r:1 w:0) + // Storage: Staking Nominators (r:1 w:1) + // Storage: Staking CounterForNominators (r:1 w:1) + // Storage: BagsList ListNodes (r:2 w:2) + // Storage: BagsList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) - // Storage: Staking SpanSlash (r:0 w:2) - fn withdraw_unbonded_kill(s: u32, ) -> Weight { - (85_826_000 as Weight) - // Standard Error: 2_000 - .saturating_add((2_453_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(8 as Weight)) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + fn withdraw_unbonded_kill(_s: u32, ) -> Weight { + (101_870_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(13 as Weight)) + .saturating_add(RocksDbWeight::get().writes(11 as Weight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking MinValidatorBond (r:1 w:0) // Storage: Staking Validators (r:1 w:1) // Storage: Staking MaxValidatorsCount (r:1 w:0) - // Storage: Staking Nominators (r:1 w:0) + // Storage: Staking Nominators (r:1 w:1) + // Storage: Staking CounterForNominators (r:1 w:1) + // Storage: BagsList ListNodes (r:2 w:2) + // Storage: BagsList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: Staking CounterForValidators (r:1 w:1) fn validate() -> Weight { - (34_936_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + (69_092_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(11 as Weight)) + .saturating_add(RocksDbWeight::get().writes(8 as Weight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) fn kick(k: u32, ) -> Weight { - (23_493_000 as Weight) - // Standard Error: 17_000 - .saturating_add((16_632_000 as Weight).saturating_mul(k as Weight)) + (21_468_000 as Weight) + // Standard Error: 19_000 + .saturating_add((16_415_000 as Weight).saturating_mul(k as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(k as Weight))) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) @@ -496,84 +542,97 @@ impl WeightInfo for () { // Storage: Staking Validators (r:2 w:0) // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking CounterForNominators (r:1 w:1) + // Storage: Staking Bonded (r:1 w:0) + // Storage: BagsList ListNodes (r:2 w:2) + // Storage: BagsList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) fn nominate(n: u32, ) -> Weight { - (41_733_000 as Weight) - // Standard Error: 11_000 - .saturating_add((5_840_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + (82_389_000 as Weight) + // Standard Error: 14_000 + .saturating_add((5_597_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(12 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(6 as Weight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Validators (r:1 w:0) - // Storage: Staking Nominators (r:1 w:0) + // Storage: Staking Nominators (r:1 w:1) + // Storage: Staking CounterForNominators (r:1 w:1) + // Storage: BagsList ListNodes (r:2 w:2) + // Storage: BagsList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) fn chill() -> Weight { - (17_901_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + (69_655_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(8 as Weight)) + .saturating_add(RocksDbWeight::get().writes(6 as Weight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Payee (r:0 w:1) fn set_payee() -> Weight { - (13_760_000 as Weight) + (12_770_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Staking Bonded (r:1 w:1) // Storage: Staking Ledger (r:2 w:2) fn set_controller() -> Weight { - (28_388_000 as Weight) + (27_756_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Staking ValidatorCount (r:0 w:1) fn set_validator_count() -> Weight { - (2_537_000 as Weight) + (2_446_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Staking ForceEra (r:0 w:1) fn force_no_eras() -> Weight { - (2_749_000 as Weight) + (2_720_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era() -> Weight { - (2_834_000 as Weight) + (2_711_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era_always() -> Weight { - (2_800_000 as Weight) + (2_796_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Staking Invulnerables (r:0 w:1) fn set_invulnerables(v: u32, ) -> Weight { - (3_429_000 as Weight) + (3_141_000 as Weight) // Standard Error: 0 - .saturating_add((56_000 as Weight).saturating_mul(v as Weight)) + .saturating_add((53_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Staking Bonded (r:1 w:1) // Storage: Staking SlashingSpans (r:1 w:0) // Storage: Staking Validators (r:1 w:0) - // Storage: Staking Nominators (r:1 w:0) + // Storage: Staking Nominators (r:1 w:1) + // Storage: Staking CounterForNominators (r:1 w:1) + // Storage: BagsList ListNodes (r:2 w:2) + // Storage: BagsList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: Staking Ledger (r:0 w:1) // Storage: Staking Payee (r:0 w:1) // Storage: Staking SpanSlash (r:0 w:2) fn force_unstake(s: u32, ) -> Weight { - (61_799_000 as Weight) + (97_394_000 as Weight) // Standard Error: 2_000 - .saturating_add((2_451_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + .saturating_add((2_370_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(11 as Weight)) + .saturating_add(RocksDbWeight::get().writes(12 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } // Storage: Staking UnappliedSlashes (r:1 w:1) fn cancel_deferred_slash(s: u32, ) -> Weight { - (3_383_988_000 as Weight) - // Standard Error: 223_000 - .saturating_add((19_981_000 as Weight).saturating_mul(s as Weight)) + (2_783_746_000 as Weight) + // Standard Error: 182_000 + .saturating_add((16_223_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -588,9 +647,9 @@ impl WeightInfo for () { // Storage: Staking Payee (r:2 w:0) // Storage: System Account (r:2 w:2) fn payout_stakers_dead_controller(n: u32, ) -> Weight { - (124_714_000 as Weight) - // Standard Error: 23_000 - .saturating_add((47_575_000 as Weight).saturating_mul(n as Weight)) + (109_233_000 as Weight) + // Standard Error: 17_000 + .saturating_add((47_612_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(10 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) @@ -608,9 +667,9 @@ impl WeightInfo for () { // Storage: System Account (r:2 w:2) // Storage: Balances Locks (r:2 w:2) fn payout_stakers_alive_staked(n: u32, ) -> Weight { - (160_203_000 as Weight) - // Standard Error: 24_000 - .saturating_add((61_321_000 as Weight).saturating_mul(n as Weight)) + (177_392_000 as Weight) + // Standard Error: 20_000 + .saturating_add((60_771_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(11 as Weight)) .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) @@ -619,12 +678,15 @@ impl WeightInfo for () { // Storage: Staking Ledger (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) + // Storage: BagsList ListNodes (r:3 w:3) + // Storage: Staking Bonded (r:1 w:0) + // Storage: BagsList ListBags (r:2 w:2) fn rebond(l: u32, ) -> Weight { - (49_593_000 as Weight) - // Standard Error: 3_000 - .saturating_add((78_000 as Weight).saturating_mul(l as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + (111_858_000 as Weight) + // Standard Error: 4_000 + .saturating_add((36_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(RocksDbWeight::get().reads(9 as Weight)) + .saturating_add(RocksDbWeight::get().writes(8 as Weight)) } // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking HistoryDepth (r:1 w:1) @@ -637,8 +699,8 @@ impl WeightInfo for () { // Storage: Staking ErasStartSessionIndex (r:0 w:1) fn set_history_depth(e: u32, ) -> Weight { (0 as Weight) - // Standard Error: 71_000 - .saturating_add((35_237_000 as Weight).saturating_mul(e as Weight)) + // Standard Error: 68_000 + .saturating_add((33_495_000 as Weight).saturating_mul(e as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) .saturating_add(RocksDbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) @@ -646,19 +708,22 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:1) // Storage: Staking Bonded (r:1 w:1) // Storage: Staking SlashingSpans (r:1 w:1) - // Storage: Staking Validators (r:1 w:1) - // Storage: Staking CounterForValidators (r:1 w:1) - // Storage: Staking Nominators (r:1 w:0) + // Storage: Staking Validators (r:1 w:0) + // Storage: Staking Nominators (r:1 w:1) + // Storage: Staking CounterForNominators (r:1 w:1) + // Storage: BagsList ListNodes (r:2 w:2) + // Storage: BagsList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: Staking Ledger (r:0 w:1) // Storage: Staking Payee (r:0 w:1) // Storage: Staking SpanSlash (r:0 w:1) fn reap_stash(s: u32, ) -> Weight { - (72_484_000 as Weight) - // Standard Error: 2_000 - .saturating_add((2_452_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(7 as Weight)) - .saturating_add(RocksDbWeight::get().writes(8 as Weight)) + (100_178_000 as Weight) + // Standard Error: 1_000 + .saturating_add((2_358_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(11 as Weight)) + .saturating_add(RocksDbWeight::get().writes(12 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } // Storage: Staking CounterForNominators (r:1 w:0) @@ -667,7 +732,9 @@ impl WeightInfo for () { // Storage: Staking Bonded (r:101 w:0) // Storage: Staking Ledger (r:101 w:0) // Storage: Staking SlashingSpans (r:1 w:0) - // Storage: Staking Nominators (r:101 w:0) + // Storage: BagsList ListBags (r:200 w:0) + // Storage: BagsList ListNodes (r:100 w:0) + // Storage: Staking Nominators (r:100 w:0) // Storage: Staking ValidatorCount (r:1 w:0) // Storage: Staking MinimumValidatorCount (r:1 w:0) // Storage: Staking CurrentEra (r:1 w:1) @@ -679,39 +746,43 @@ impl WeightInfo for () { // Storage: Staking ErasStartSessionIndex (r:0 w:1) fn new_era(v: u32, n: u32, ) -> Weight { (0 as Weight) - // Standard Error: 856_000 - .saturating_add((305_057_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 860_000 + .saturating_add((298_721_000 as Weight).saturating_mul(v as Weight)) // Standard Error: 43_000 - .saturating_add((47_890_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(9 as Weight)) + .saturating_add((49_427_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(208 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) - .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) + .saturating_add(RocksDbWeight::get().reads((4 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) } + // Storage: Staking CounterForNominators (r:1 w:0) + // Storage: Staking CounterForValidators (r:1 w:0) // Storage: Staking Validators (r:501 w:0) // Storage: Staking Bonded (r:1500 w:0) // Storage: Staking Ledger (r:1500 w:0) // Storage: Staking SlashingSpans (r:21 w:0) - // Storage: Staking Nominators (r:1001 w:0) + // Storage: BagsList ListBags (r:200 w:0) + // Storage: BagsList ListNodes (r:1000 w:0) + // Storage: Staking Nominators (r:1000 w:0) fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight { (0 as Weight) - // Standard Error: 98_000 - .saturating_add((25_610_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 98_000 - .saturating_add((28_064_000 as Weight).saturating_mul(n as Weight)) - // Standard Error: 3_346_000 - .saturating_add((18_123_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + // Standard Error: 91_000 + .saturating_add((26_605_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 91_000 + .saturating_add((31_481_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 3_122_000 + .saturating_add((16_672_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(204 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) - .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) + .saturating_add(RocksDbWeight::get().reads((4 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) } // Storage: Staking Validators (r:501 w:0) fn get_npos_targets(v: u32, ) -> Weight { - (30_422_000 as Weight) - // Standard Error: 33_000 - .saturating_add((11_252_000 as Weight).saturating_mul(v as Weight)) + (0 as Weight) + // Standard Error: 34_000 + .saturating_add((10_558_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) } @@ -721,19 +792,22 @@ impl WeightInfo for () { // Storage: Staking MaxNominatorsCount (r:0 w:1) // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_limits() -> Weight { - (6_486_000 as Weight) + (6_353_000 as Weight) .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking ChillThreshold (r:1 w:0) - // Storage: Staking Nominators (r:1 w:0) - // Storage: Staking Validators (r:1 w:1) - // Storage: Staking MaxValidatorsCount (r:1 w:0) - // Storage: Staking CounterForValidators (r:1 w:1) - // Storage: Staking MinValidatorBond (r:1 w:0) + // Storage: Staking Nominators (r:1 w:1) + // Storage: Staking MaxNominatorsCount (r:1 w:0) + // Storage: Staking CounterForNominators (r:1 w:1) + // Storage: Staking MinNominatorBond (r:1 w:0) + // Storage: Staking Validators (r:1 w:0) + // Storage: BagsList ListNodes (r:2 w:2) + // Storage: BagsList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) fn chill_other() -> Weight { - (58_222_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(7 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + (83_389_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(11 as Weight)) + .saturating_add(RocksDbWeight::get().writes(6 as Weight)) } } diff --git a/frame/support/Cargo.toml b/frame/support/Cargo.toml index b09ed65a114dc..f4af38db54e2d 100644 --- a/frame/support/Cargo.toml +++ b/frame/support/Cargo.toml @@ -31,7 +31,7 @@ once_cell = { version = "1", default-features = false, optional = true } sp-state-machine = { version = "0.10.0-dev", optional = true, path = "../../primitives/state-machine" } bitflags = "1.3" impl-trait-for-tuples = "0.2.1" -smallvec = "1.4.1" +smallvec = "1.7.0" log = { version = "0.4.14", default-features = false } [dev-dependencies] diff --git a/frame/support/procedural/src/construct_runtime/mod.rs b/frame/support/procedural/src/construct_runtime/mod.rs index 8aacd8f0aa810..04bb2ead645d2 100644 --- a/frame/support/procedural/src/construct_runtime/mod.rs +++ b/frame/support/procedural/src/construct_runtime/mod.rs @@ -132,7 +132,7 @@ fn construct_runtime_parsed(definition: RuntimeDefinition) -> Result( } fn decl_pallet_runtime_setup( + runtime: &Ident, pallet_declarations: &[Pallet], scrate: &TokenStream2, ) -> TokenStream2 { - let names = pallet_declarations.iter().map(|d| &d.name); - let names2 = pallet_declarations.iter().map(|d| &d.name); + let names = pallet_declarations.iter().map(|d| &d.name).collect::>(); let name_strings = pallet_declarations.iter().map(|d| d.name.to_string()); + let module_names = pallet_declarations.iter().map(|d| d.path.module_name()); let indices = pallet_declarations.iter().map(|pallet| pallet.index as usize); + let pallet_structs = pallet_declarations + .iter() + .map(|pallet| { + let path = &pallet.path; + match pallet.instance.as_ref() { + Some(inst) => quote!(#path::Pallet<#runtime, #path::#inst>), + None => quote!(#path::Pallet<#runtime>), + } + }) + .collect::>(); quote!( /// Provides an implementation of `PalletInfo` to provide information @@ -264,13 +275,37 @@ fn decl_pallet_runtime_setup( fn name() -> Option<&'static str> { let type_id = #scrate::sp_std::any::TypeId::of::

(); #( - if type_id == #scrate::sp_std::any::TypeId::of::<#names2>() { + if type_id == #scrate::sp_std::any::TypeId::of::<#names>() { return Some(#name_strings) } )* None } + + fn module_name() -> Option<&'static str> { + let type_id = #scrate::sp_std::any::TypeId::of::

(); + #( + if type_id == #scrate::sp_std::any::TypeId::of::<#names>() { + return Some(#module_names) + } + )* + + None + } + + fn crate_version() -> Option<#scrate::traits::CrateVersion> { + let type_id = #scrate::sp_std::any::TypeId::of::

(); + #( + if type_id == #scrate::sp_std::any::TypeId::of::<#names>() { + return Some( + <#pallet_structs as #scrate::traits::PalletInfoAccess>::crate_version() + ) + } + )* + + None + } } ) } diff --git a/frame/support/procedural/src/construct_runtime/parse.rs b/frame/support/procedural/src/construct_runtime/parse.rs index 6f2fd82e73f4b..a0ec6dfa5803e 100644 --- a/frame/support/procedural/src/construct_runtime/parse.rs +++ b/frame/support/procedural/src/construct_runtime/parse.rs @@ -188,6 +188,18 @@ pub struct PalletPath { pub inner: Path, } +impl PalletPath { + pub fn module_name(&self) -> String { + self.inner.segments.iter().fold(String::new(), |mut acc, segment| { + if !acc.is_empty() { + acc.push_str("::"); + } + acc.push_str(&segment.ident.to_string()); + acc + }) + } +} + impl Parse for PalletPath { fn parse(input: ParseStream) -> Result { let mut lookahead = input.lookahead1(); diff --git a/frame/support/procedural/src/crate_version.rs b/frame/support/procedural/src/crate_version.rs new file mode 100644 index 0000000000000..cfa35c6190e15 --- /dev/null +++ b/frame/support/procedural/src/crate_version.rs @@ -0,0 +1,54 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementation of macros related to crate versioning. + +use super::get_cargo_env_var; +use frame_support_procedural_tools::generate_crate_access_2018; +use proc_macro2::{Span, TokenStream}; +use syn::{Error, Result}; + +/// Create an error that will be shown by rustc at the call site of the macro. +fn create_error(message: &str) -> Error { + Error::new(Span::call_site(), message) +} + +/// Implementation of the `crate_to_crate_version!` macro. +pub fn crate_to_crate_version(input: proc_macro::TokenStream) -> Result { + if !input.is_empty() { + return Err(create_error("No arguments expected!")) + } + + let major_version = get_cargo_env_var::("CARGO_PKG_VERSION_MAJOR") + .map_err(|_| create_error("Major version needs to fit into `u16`"))?; + + let minor_version = get_cargo_env_var::("CARGO_PKG_VERSION_MINOR") + .map_err(|_| create_error("Minor version needs to fit into `u8`"))?; + + let patch_version = get_cargo_env_var::("CARGO_PKG_VERSION_PATCH") + .map_err(|_| create_error("Patch version needs to fit into `u8`"))?; + + let crate_ = generate_crate_access_2018("frame-support")?; + + Ok(quote::quote! { + #crate_::traits::CrateVersion { + major: #major_version, + minor: #minor_version, + patch: #patch_version, + } + }) +} diff --git a/frame/support/procedural/src/lib.rs b/frame/support/procedural/src/lib.rs index a8ac022c35c6b..6987fc49b9a8c 100644 --- a/frame/support/procedural/src/lib.rs +++ b/frame/support/procedural/src/lib.rs @@ -21,6 +21,7 @@ mod clone_no_bound; mod construct_runtime; +mod crate_version; mod debug_no_bound; mod default_no_bound; mod dummy_part_checker; @@ -31,7 +32,7 @@ mod storage; mod transactional; use proc_macro::TokenStream; -use std::cell::RefCell; +use std::{cell::RefCell, str::FromStr}; pub(crate) use storage::INHERENT_INSTANCE_NAME; thread_local! { @@ -52,6 +53,16 @@ impl Counter { } } +/// Get the value from the given environment variable set by cargo. +/// +/// The value is parsed into the requested destination type. +fn get_cargo_env_var(version_env: &str) -> std::result::Result { + let version = std::env::var(version_env) + .unwrap_or_else(|_| panic!("`{}` is always set by cargo; qed", version_env)); + + T::from_str(&version).map_err(drop) +} + /// Declares strongly-typed wrappers around codec-compatible types in storage. /// /// ## Example @@ -462,6 +473,13 @@ pub fn require_transactional(attr: TokenStream, input: TokenStream) -> TokenStre .unwrap_or_else(|e| e.to_compile_error().into()) } +#[proc_macro] +pub fn crate_to_crate_version(input: TokenStream) -> TokenStream { + crate_version::crate_to_crate_version(input) + .unwrap_or_else(|e| e.to_compile_error()) + .into() +} + /// The number of module instances supported by the runtime, starting at index 1, /// and up to `NUMBER_OF_INSTANCE`. pub(crate) const NUMBER_OF_INSTANCE: u8 = 16; diff --git a/frame/support/procedural/src/pallet/expand/pallet_struct.rs b/frame/support/procedural/src/pallet/expand/pallet_struct.rs index a217742fec55d..ea601f138ea05 100644 --- a/frame/support/procedural/src/pallet/expand/pallet_struct.rs +++ b/frame/support/procedural/src/pallet/expand/pallet_struct.rs @@ -98,28 +98,39 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { ) }; - // Depending on the flag `generate_storage_info` we use partial or full storage info from - // storage. - let (storage_info_span, storage_info_trait, storage_info_method) = - if let Some(span) = def.pallet_struct.generate_storage_info { - ( - span, - quote::quote_spanned!(span => StorageInfoTrait), - quote::quote_spanned!(span => storage_info), - ) - } else { - let span = def.pallet_struct.attr_span; - ( - span, - quote::quote_spanned!(span => PartialStorageInfoTrait), - quote::quote_spanned!(span => partial_storage_info), - ) - }; + let storage_info_span = + def.pallet_struct.generate_storage_info.unwrap_or(def.pallet_struct.attr_span); let storage_names = &def.storages.iter().map(|storage| &storage.ident).collect::>(); let storage_cfg_attrs = &def.storages.iter().map(|storage| &storage.cfg_attrs).collect::>(); + // Depending on the flag `generate_storage_info` and the storage attribute `unbounded`, we use + // partial or full storage info from storage. + let storage_info_traits = &def + .storages + .iter() + .map(|storage| { + if storage.unbounded || def.pallet_struct.generate_storage_info.is_none() { + quote::quote_spanned!(storage_info_span => PartialStorageInfoTrait) + } else { + quote::quote_spanned!(storage_info_span => StorageInfoTrait) + } + }) + .collect::>(); + + let storage_info_methods = &def + .storages + .iter() + .map(|storage| { + if storage.unbounded || def.pallet_struct.generate_storage_info.is_none() { + quote::quote_spanned!(storage_info_span => partial_storage_info) + } else { + quote::quote_spanned!(storage_info_span => storage_info) + } + }) + .collect::>(); + let storage_info = quote::quote_spanned!(storage_info_span => impl<#type_impl_gen> #frame_support::traits::StorageInfoTrait for #pallet_ident<#type_use_gen> @@ -136,8 +147,8 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { { let mut storage_info = < #storage_names<#type_use_gen> - as #frame_support::traits::#storage_info_trait - >::#storage_info_method(); + as #frame_support::traits::#storage_info_traits + >::#storage_info_methods(); res.append(&mut storage_info); } )* @@ -208,6 +219,18 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { .expect("Pallet is part of the runtime because pallet `Config` trait is \ implemented by the runtime") } + + fn module_name() -> &'static str { + < + ::PalletInfo as #frame_support::traits::PalletInfo + >::module_name::() + .expect("Pallet is part of the runtime because pallet `Config` trait is \ + implemented by the runtime") + } + + fn crate_version() -> #frame_support::traits::CrateVersion { + #frame_support::crate_to_crate_version!() + } } #storage_info diff --git a/frame/support/procedural/src/pallet/expand/storage.rs b/frame/support/procedural/src/pallet/expand/storage.rs index 0f7133f10dd47..a4f030722f1c1 100644 --- a/frame/support/procedural/src/pallet/expand/storage.rs +++ b/frame/support/procedural/src/pallet/expand/storage.rs @@ -19,27 +19,68 @@ use crate::pallet::{ parse::storage::{Metadata, QueryKind, StorageDef, StorageGenerics}, Def, }; -use std::collections::HashSet; +use std::collections::HashMap; -/// Generate the prefix_ident related the the storage. +/// Generate the prefix_ident related to the storage. /// prefix_ident is used for the prefix struct to be given to storage as first generic param. fn prefix_ident(storage: &StorageDef) -> syn::Ident { let storage_ident = &storage.ident; syn::Ident::new(&format!("_GeneratedPrefixForStorage{}", storage_ident), storage_ident.span()) } +/// Generate the counter_prefix_ident related to the storage. +/// counter_prefix_ident is used for the prefix struct to be given to counted storage map. +fn counter_prefix_ident(storage_ident: &syn::Ident) -> syn::Ident { + syn::Ident::new( + &format!("_GeneratedCounterPrefixForStorage{}", storage_ident), + storage_ident.span(), + ) +} + +/// Generate the counter_prefix related to the storage. +/// counter_prefix is used by counted storage map. +fn counter_prefix(prefix: &str) -> String { + format!("CounterFor{}", prefix) +} + /// Check for duplicated storage prefixes. This step is necessary since users can specify an /// alternative storage prefix using the #[pallet::storage_prefix] syntax, and we need to ensure /// that the prefix specified by the user is not a duplicate of an existing one. -fn check_prefix_duplicates(storage_def: &StorageDef, set: &mut HashSet) -> syn::Result<()> { +fn check_prefix_duplicates( + storage_def: &StorageDef, + // A hashmap of all already used prefix and their associated error if duplication + used_prefixes: &mut HashMap, +) -> syn::Result<()> { let prefix = storage_def.prefix(); + let dup_err = syn::Error::new( + storage_def.prefix_span(), + format!("Duplicate storage prefixes found for `{}`", prefix), + ); + + if let Some(other_dup_err) = used_prefixes.insert(prefix.clone(), dup_err.clone()) { + let mut err = dup_err; + err.combine(other_dup_err); + return Err(err) + } - if !set.insert(prefix.clone()) { - let err = syn::Error::new( + if let Metadata::CountedMap { .. } = storage_def.metadata { + let counter_prefix = counter_prefix(&prefix); + let counter_dup_err = syn::Error::new( storage_def.prefix_span(), - format!("Duplicate storage prefixes found for `{}`", prefix), + format!( + "Duplicate storage prefixes found for `{}`, used for counter associated to \ + counted storage map", + counter_prefix, + ), ); - return Err(err) + + if let Some(other_dup_err) = + used_prefixes.insert(counter_prefix.clone(), counter_dup_err.clone()) + { + let mut err = counter_dup_err; + err.combine(other_dup_err); + return Err(err) + } } Ok(()) @@ -51,11 +92,8 @@ fn check_prefix_duplicates(storage_def: &StorageDef, set: &mut HashSet) /// * Add `#[allow(type_alias_bounds)]` pub fn process_generics(def: &mut Def) -> syn::Result<()> { let frame_support = &def.frame_support; - let mut prefix_set = HashSet::new(); for storage_def in def.storages.iter_mut() { - check_prefix_duplicates(storage_def, &mut prefix_set)?; - let item = &mut def.item.content.as_mut().expect("Checked by def").1[storage_def.index]; let typ_item = match item { @@ -109,6 +147,24 @@ pub fn process_generics(def: &mut Def) -> syn::Result<()> { let max_values = max_values.unwrap_or_else(|| default_max_values.clone()); args.args.push(syn::GenericArgument::Type(max_values)); }, + StorageGenerics::CountedMap { + hasher, + key, + value, + query_kind, + on_empty, + max_values, + } => { + args.args.push(syn::GenericArgument::Type(hasher)); + args.args.push(syn::GenericArgument::Type(key)); + args.args.push(syn::GenericArgument::Type(value)); + let query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); + args.args.push(syn::GenericArgument::Type(query_kind)); + let on_empty = on_empty.unwrap_or_else(|| default_on_empty.clone()); + args.args.push(syn::GenericArgument::Type(on_empty)); + let max_values = max_values.unwrap_or_else(|| default_max_values.clone()); + args.args.push(syn::GenericArgument::Type(max_values)); + }, StorageGenerics::DoubleMap { hasher1, key1, @@ -162,11 +218,22 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { return e.into_compile_error().into() } + // Check for duplicate prefixes + let mut prefix_set = HashMap::new(); + let mut errors = def + .storages + .iter() + .filter_map(|storage_def| check_prefix_duplicates(storage_def, &mut prefix_set).err()); + if let Some(mut final_error) = errors.next() { + errors.for_each(|error| final_error.combine(error)); + return final_error.into_compile_error() + } + let frame_support = &def.frame_support; let frame_system = &def.frame_system; let pallet_ident = &def.pallet_struct.pallet; - let entries = def.storages.iter().map(|storage| { + let entries_builder = def.storages.iter().map(|storage| { let docs = &storage.docs; let ident = &storage.ident; @@ -176,14 +243,14 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { let cfg_attrs = &storage.cfg_attrs; quote::quote_spanned!(storage.attr_span => - #(#cfg_attrs)* #frame_support::metadata::StorageEntryMetadata { - name: <#full_ident as #frame_support::storage::StorageEntryMetadata>::NAME, - modifier: <#full_ident as #frame_support::storage::StorageEntryMetadata>::MODIFIER, - ty: <#full_ident as #frame_support::storage::StorageEntryMetadata>::ty(), - default: <#full_ident as #frame_support::storage::StorageEntryMetadata>::default(), - docs: #frame_support::sp_std::vec![ - #( #docs, )* - ], + #(#cfg_attrs)* + { + <#full_ident as #frame_support::storage::StorageEntryMetadataBuilder>::build_metadata( + #frame_support::sp_std::vec![ + #( #docs, )* + ], + &mut entries, + ); } ) }); @@ -246,6 +313,27 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { } ) }, + Metadata::CountedMap { key, value } => { + let query = match storage.query_kind.as_ref().expect("Checked by def") { + QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => + Option<#value> + ), + QueryKind::ValueQuery => quote::quote!(#value), + }; + quote::quote_spanned!(storage.attr_span => + #(#cfg_attrs)* + impl<#type_impl_gen> #pallet_ident<#type_use_gen> #completed_where_clause { + #( #docs )* + pub fn #getter(k: KArg) -> #query where + KArg: #frame_support::codec::EncodeLike<#key>, + { + // NOTE: we can't use any trait here because CountedStorageMap + // doesn't implement any. + <#full_ident>::get(k) + } + } + ) + }, Metadata::DoubleMap { key1, key2, value } => { let query = match storage.query_kind.as_ref().expect("Checked by def") { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => @@ -311,7 +399,44 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { let cfg_attrs = &storage_def.cfg_attrs; + let maybe_counter = if let Metadata::CountedMap { .. } = storage_def.metadata { + let counter_prefix_struct_ident = counter_prefix_ident(&storage_def.ident); + let counter_prefix_struct_const = counter_prefix(&prefix_struct_const); + + quote::quote_spanned!(storage_def.attr_span => + #(#cfg_attrs)* + #prefix_struct_vis struct #counter_prefix_struct_ident<#type_use_gen>( + core::marker::PhantomData<(#type_use_gen,)> + ); + #(#cfg_attrs)* + impl<#type_impl_gen> #frame_support::traits::StorageInstance + for #counter_prefix_struct_ident<#type_use_gen> + #config_where_clause + { + fn pallet_prefix() -> &'static str { + < + ::PalletInfo + as #frame_support::traits::PalletInfo + >::name::>() + .expect("Every active pallet has a name in the runtime; qed") + } + const STORAGE_PREFIX: &'static str = #counter_prefix_struct_const; + } + #(#cfg_attrs)* + impl<#type_impl_gen> #frame_support::storage::types::CountedStorageMapInstance + for #prefix_struct_ident<#type_use_gen> + #config_where_clause + { + type CounterPrefix = #counter_prefix_struct_ident<#type_use_gen>; + } + ) + } else { + proc_macro2::TokenStream::default() + }; + quote::quote_spanned!(storage_def.attr_span => + #maybe_counter + #(#cfg_attrs)* #prefix_struct_vis struct #prefix_struct_ident<#type_use_gen>( core::marker::PhantomData<(#type_use_gen,)> @@ -351,9 +476,12 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { #frame_support::traits::PalletInfo >::name::<#pallet_ident<#type_use_gen>>() .expect("Every active pallet has a name in the runtime; qed"), - entries: #frame_support::sp_std::vec![ - #( #entries, )* - ], + entries: { + #[allow(unused_mut)] + let mut entries = #frame_support::sp_std::vec![]; + #( #entries_builder )* + entries + }, } } } diff --git a/frame/support/procedural/src/pallet/parse/storage.rs b/frame/support/procedural/src/pallet/parse/storage.rs index e58b5d2048863..cd29baf93d849 100644 --- a/frame/support/procedural/src/pallet/parse/storage.rs +++ b/frame/support/procedural/src/pallet/parse/storage.rs @@ -27,6 +27,7 @@ mod keyword { syn::custom_keyword!(pallet); syn::custom_keyword!(getter); syn::custom_keyword!(storage_prefix); + syn::custom_keyword!(unbounded); syn::custom_keyword!(OptionQuery); syn::custom_keyword!(ValueQuery); } @@ -34,15 +35,17 @@ mod keyword { /// Parse for one of the following: /// * `#[pallet::getter(fn dummy)]` /// * `#[pallet::storage_prefix = "CustomName"]` +/// * `#[pallet::unbounded]` pub enum PalletStorageAttr { Getter(syn::Ident, proc_macro2::Span), StorageName(syn::LitStr, proc_macro2::Span), + Unbounded(proc_macro2::Span), } impl PalletStorageAttr { fn attr_span(&self) -> proc_macro2::Span { match self { - Self::Getter(_, span) | Self::StorageName(_, span) => *span, + Self::Getter(_, span) | Self::StorageName(_, span) | Self::Unbounded(span) => *span, } } } @@ -76,16 +79,50 @@ impl syn::parse::Parse for PalletStorageAttr { })?; Ok(Self::StorageName(renamed_prefix, attr_span)) + } else if lookahead.peek(keyword::unbounded) { + content.parse::()?; + + Ok(Self::Unbounded(attr_span)) } else { Err(lookahead.error()) } } } +struct PalletStorageAttrInfo { + getter: Option, + rename_as: Option, + unbounded: bool, +} + +impl PalletStorageAttrInfo { + fn from_attrs(attrs: Vec) -> syn::Result { + let mut getter = None; + let mut rename_as = None; + let mut unbounded = false; + for attr in attrs { + match attr { + PalletStorageAttr::Getter(ident, ..) if getter.is_none() => getter = Some(ident), + PalletStorageAttr::StorageName(name, ..) if rename_as.is_none() => + rename_as = Some(name), + PalletStorageAttr::Unbounded(..) if !unbounded => unbounded = true, + attr => + return Err(syn::Error::new( + attr.attr_span(), + "Invalid attribute: Duplicate attribute", + )), + } + } + + Ok(PalletStorageAttrInfo { getter, rename_as, unbounded }) + } +} + /// The value and key types used by storages. Needed to expand metadata. pub enum Metadata { Value { value: syn::Type }, Map { value: syn::Type, key: syn::Type }, + CountedMap { value: syn::Type, key: syn::Type }, DoubleMap { value: syn::Type, key1: syn::Type, key2: syn::Type }, NMap { keys: Vec, keygen: syn::Type, value: syn::Type }, } @@ -130,6 +167,8 @@ pub struct StorageDef { /// generics of the storage. /// If generics are not named, this is none. pub named_generics: Option, + /// If the value stored in this storage is unbounded. + pub unbounded: bool, } /// The parsed generic from the @@ -153,6 +192,14 @@ pub enum StorageGenerics { on_empty: Option, max_values: Option, }, + CountedMap { + hasher: syn::Type, + key: syn::Type, + value: syn::Type, + query_kind: Option, + on_empty: Option, + max_values: Option, + }, Value { value: syn::Type, query_kind: Option, @@ -173,6 +220,7 @@ impl StorageGenerics { let res = match self.clone() { Self::DoubleMap { value, key1, key2, .. } => Metadata::DoubleMap { value, key1, key2 }, Self::Map { value, key, .. } => Metadata::Map { value, key }, + Self::CountedMap { value, key, .. } => Metadata::CountedMap { value, key }, Self::Value { value, .. } => Metadata::Value { value }, Self::NMap { keygen, value, .. } => Metadata::NMap { keys: collect_keys(&keygen)?, keygen, value }, @@ -186,6 +234,7 @@ impl StorageGenerics { match &self { Self::DoubleMap { query_kind, .. } | Self::Map { query_kind, .. } | + Self::CountedMap { query_kind, .. } | Self::Value { query_kind, .. } | Self::NMap { query_kind, .. } => query_kind.clone(), } @@ -195,6 +244,7 @@ impl StorageGenerics { enum StorageKind { Value, Map, + CountedMap, DoubleMap, NMap, } @@ -324,6 +374,33 @@ fn process_named_generics( max_values: parsed.remove("MaxValues").map(|binding| binding.ty), } }, + StorageKind::CountedMap => { + check_generics( + &parsed, + &["Hasher", "Key", "Value"], + &["QueryKind", "OnEmpty", "MaxValues"], + "CountedStorageMap", + args_span, + )?; + + StorageGenerics::CountedMap { + hasher: parsed + .remove("Hasher") + .map(|binding| binding.ty) + .expect("checked above as mandatory generic"), + key: parsed + .remove("Key") + .map(|binding| binding.ty) + .expect("checked above as mandatory generic"), + value: parsed + .remove("Value") + .map(|binding| binding.ty) + .expect("checked above as mandatory generic"), + query_kind: parsed.remove("QueryKind").map(|binding| binding.ty), + on_empty: parsed.remove("OnEmpty").map(|binding| binding.ty), + max_values: parsed.remove("MaxValues").map(|binding| binding.ty), + } + }, StorageKind::DoubleMap => { check_generics( &parsed, @@ -425,6 +502,11 @@ fn process_unnamed_generics( Metadata::Map { key: retrieve_arg(2)?, value: retrieve_arg(3)? }, retrieve_arg(4).ok(), ), + StorageKind::CountedMap => ( + None, + Metadata::CountedMap { key: retrieve_arg(2)?, value: retrieve_arg(3)? }, + retrieve_arg(4).ok(), + ), StorageKind::DoubleMap => ( None, Metadata::DoubleMap { @@ -451,6 +533,7 @@ fn process_generics( let storage_kind = match &*segment.ident.to_string() { "StorageValue" => StorageKind::Value, "StorageMap" => StorageKind::Map, + "CountedStorageMap" => StorageKind::CountedMap, "StorageDoubleMap" => StorageKind::DoubleMap, "StorageNMap" => StorageKind::NMap, found => { @@ -584,25 +667,8 @@ impl StorageDef { }; let attrs: Vec = helper::take_item_pallet_attrs(&mut item.attrs)?; - let (mut getters, mut names) = attrs - .into_iter() - .partition::, _>(|attr| matches!(attr, PalletStorageAttr::Getter(..))); - if getters.len() > 1 { - let msg = "Invalid pallet::storage, multiple argument pallet::getter found"; - return Err(syn::Error::new(getters[1].attr_span(), msg)) - } - if names.len() > 1 { - let msg = "Invalid pallet::storage, multiple argument pallet::storage_prefix found"; - return Err(syn::Error::new(names[1].attr_span(), msg)) - } - let getter = getters.pop().map(|attr| match attr { - PalletStorageAttr::Getter(ident, _) => ident, - _ => unreachable!(), - }); - let rename_as = names.pop().map(|attr| match attr { - PalletStorageAttr::StorageName(lit, _) => lit, - _ => unreachable!(), - }); + let PalletStorageAttrInfo { getter, rename_as, unbounded } = + PalletStorageAttrInfo::from_attrs(attrs)?; let cfg_attrs = helper::get_item_cfg_attrs(&item.attrs); @@ -659,6 +725,7 @@ impl StorageDef { where_clause, cfg_attrs, named_generics, + unbounded, }) } } diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index 2e6777fee2af2..b4e9071e361aa 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -2151,6 +2151,18 @@ macro_rules! decl_module { .expect("Pallet is part of the runtime because pallet `Config` trait is \ implemented by the runtime") } + + fn module_name() -> &'static str { + < + <$trait_instance as $system::Config>::PalletInfo as $crate::traits::PalletInfo + >::module_name::() + .expect("Pallet is part of the runtime because pallet `Config` trait is \ + implemented by the runtime") + } + + fn crate_version() -> $crate::traits::CrateVersion { + $crate::crate_to_crate_version!() + } } // Implement GetCallName for the Call. @@ -2529,8 +2541,8 @@ mod tests { use crate::{ metadata::*, traits::{ - Get, GetCallName, IntegrityTest, OnFinalize, OnIdle, OnInitialize, OnRuntimeUpgrade, - PalletInfo, + CrateVersion, Get, GetCallName, IntegrityTest, OnFinalize, OnIdle, OnInitialize, + OnRuntimeUpgrade, PalletInfo, }, weights::{DispatchClass, DispatchInfo, Pays, RuntimeDbWeight}, }; @@ -2631,6 +2643,22 @@ mod tests { return Some("Test") } + None + } + fn module_name() -> Option<&'static str> { + let type_id = sp_std::any::TypeId::of::

(); + if type_id == sp_std::any::TypeId::of::() { + return Some("tests") + } + + None + } + fn crate_version() -> Option { + let type_id = sp_std::any::TypeId::of::

(); + if type_id == sp_std::any::TypeId::of::() { + return Some(frame_support::crate_to_crate_version!()) + } + None } } diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index cce03f1e8ce6c..f3b00c764bb35 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -702,6 +702,21 @@ pub use frame_support_procedural::DefaultNoBound; /// ``` pub use frame_support_procedural::require_transactional; +/// Convert the current crate version into a [`CrateVersion`](crate::traits::CrateVersion). +/// +/// It uses the `CARGO_PKG_VERSION_MAJOR`, `CARGO_PKG_VERSION_MINOR` and +/// `CARGO_PKG_VERSION_PATCH` environment variables to fetch the crate version. +/// This means that the [`CrateVersion`](crate::traits::CrateVersion) +/// object will correspond to the version of the crate the macro is called in! +/// +/// # Example +/// +/// ``` +/// # use frame_support::{traits::CrateVersion, crate_to_crate_version}; +/// const Version: CrateVersion = crate_to_crate_version!(); +/// ``` +pub use frame_support_procedural::crate_to_crate_version; + /// Return Err of the expression: `return Err($expression);`. /// /// Used as `fail!(expression)`. @@ -819,6 +834,7 @@ pub mod tests { StorageHasher, }; use codec::{Codec, EncodeLike}; + use frame_support::traits::CrateVersion; use sp_io::TestExternalities; use sp_std::result; @@ -832,6 +848,12 @@ pub mod tests { fn name() -> Option<&'static str> { unimplemented!("PanicPalletInfo mustn't be triggered by tests"); } + fn module_name() -> Option<&'static str> { + unimplemented!("PanicPalletInfo mustn't be triggered by tests"); + } + fn crate_version() -> Option { + unimplemented!("PanicPalletInfo mustn't be triggered by tests"); + } } pub trait Config: 'static { @@ -1293,8 +1315,8 @@ pub mod pallet_prelude { storage::{ bounded_vec::BoundedVec, types::{ - Key as NMapKey, OptionQuery, StorageDoubleMap, StorageMap, StorageNMap, - StorageValue, ValueQuery, + CountedStorageMap, Key as NMapKey, OptionQuery, StorageDoubleMap, StorageMap, + StorageNMap, StorageValue, ValueQuery, }, }, traits::{ @@ -1411,15 +1433,17 @@ pub mod pallet_prelude { /// `::Foo`. /// /// To generate the full storage info (used for PoV calculation) use the attribute -/// `#[pallet::set_storage_max_encoded_len]`, e.g.: +/// `#[pallet::generate_storage_info]`, e.g.: /// ```ignore /// #[pallet::pallet] -/// #[pallet::set_storage_max_encoded_len] +/// #[pallet::generate_storage_info] /// pub struct Pallet(_); /// ``` /// /// This require all storage to implement the trait [`traits::StorageInfoTrait`], thus all keys /// and value types must bound [`pallet_prelude::MaxEncodedLen`]. +/// Some individual storage can opt-out from this constraint by using `#[pallet::unbounded]`, +/// see `#[pallet::storage]` documentation. /// /// As the macro implements [`traits::GetStorageVersion`], the current storage version needs to /// be communicated to the macro. This can be done by using the `storage_version` attribute: @@ -1673,6 +1697,8 @@ pub mod pallet_prelude { /// * [`pallet_prelude::StorageValue`] expect `Value` and optionally `QueryKind` and `OnEmpty`, /// * [`pallet_prelude::StorageMap`] expect `Hasher`, `Key`, `Value` and optionally `QueryKind` /// and `OnEmpty`, +/// * [`pallet_prelude::CountedStorageMap`] expect `Hasher`, `Key`, `Value` and optionally +/// `QueryKind` and `OnEmpty`, /// * [`pallet_prelude::StorageDoubleMap`] expect `Hasher1`, `Key1`, `Hasher2`, `Key2`, `Value` /// and optionally `QueryKind` and `OnEmpty`. /// @@ -1684,13 +1710,16 @@ pub mod pallet_prelude { /// E.g. if runtime names the pallet "MyExample" then the storage `type Foo = ...` use the /// prefix: `Twox128(b"MyExample") ++ Twox128(b"Foo")`. /// -/// The optional attribute `#[pallet::storage_prefix = "$custom_name"]` allows to define a -/// specific name to use for the prefix. +/// For the `CountedStorageMap` variant, the Prefix also implements +/// `CountedStorageMapInstance`. It associate a `CounterPrefix`, which is implemented same as +/// above, but the storage prefix is prepend with `"CounterFor"`. +/// E.g. if runtime names the pallet "MyExample" then the storage +/// `type Foo = CountedStorageaMap<...>` will store its counter at the prefix: +/// `Twox128(b"MyExample") ++ Twox128(b"CounterForFoo")`. /// /// E.g: /// ```ignore /// #[pallet::storage] -/// #[pallet::storage_prefix = "OtherName"] /// pub(super) type MyStorage = StorageMap; /// ``` /// In this case the final prefix used by the map is @@ -1699,9 +1728,13 @@ pub mod pallet_prelude { /// The optional attribute `#[pallet::getter(fn $my_getter_fn_name)]` allows to define a /// getter function on `Pallet`. /// +/// The optional attribute `#[pallet::storage_prefix = "SomeName"]` allow to define the storage +/// prefix to use, see how `Prefix` generic is implemented above. +/// /// E.g: /// ```ignore /// #[pallet::storage] +/// #[pallet::storage_prefix = "foo"] /// #[pallet::getter(fn my_storage)] /// pub(super) type MyStorage = StorageMap; /// ``` @@ -1712,6 +1745,11 @@ pub mod pallet_prelude { /// pub(super) type MyStorage = StorageMap<_, Blake2_128Concat, u32, u32>; /// ``` /// +/// The optional attribute `#[pallet::unbounded]` allows to declare the storage as unbounded. +/// When implementating the storage info (when #[pallet::generate_storage_info]` is specified +/// on the pallet struct placeholder), the size of the storage will be declared as unbounded. +/// This can be useful for storage which can never go into PoV (Proof of Validity). +/// /// The optional attributes `#[cfg(..)]` allow conditional compilation for the storage. /// /// E.g: @@ -1738,6 +1776,8 @@ pub mod pallet_prelude { /// `_GeneratedPrefixForStorage$NameOfStorage`, and implements /// [`StorageInstance`](traits::StorageInstance) on it using the pallet and storage name. It /// then uses it as the first generic of the aliased type. +/// For `CountedStorageMap`, `CountedStorageMapInstance` is implemented, and another similar +/// struct is generated. /// /// For named generic, the macro will reorder the generics, and remove the names. /// diff --git a/frame/support/src/storage/bounded_btree_map.rs b/frame/support/src/storage/bounded_btree_map.rs index d0c0aa7c4f155..404814cb81693 100644 --- a/frame/support/src/storage/bounded_btree_map.rs +++ b/frame/support/src/storage/bounded_btree_map.rs @@ -20,7 +20,7 @@ use crate::{storage::StorageDecodeLength, traits::Get}; use codec::{Decode, Encode, MaxEncodedLen}; use sp_std::{ - borrow::Borrow, collections::btree_map::BTreeMap, convert::TryFrom, fmt, marker::PhantomData, + borrow::Borrow, collections::btree_map::BTreeMap, convert::TryFrom, marker::PhantomData, ops::Deref, }; @@ -173,12 +173,12 @@ where } #[cfg(feature = "std")] -impl fmt::Debug for BoundedBTreeMap +impl std::fmt::Debug for BoundedBTreeMap where - BTreeMap: fmt::Debug, + BTreeMap: std::fmt::Debug, S: Get, { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_tuple("BoundedBTreeMap").field(&self.0).field(&Self::bound()).finish() } } diff --git a/frame/support/src/storage/bounded_btree_set.rs b/frame/support/src/storage/bounded_btree_set.rs index 182884e655dd2..f74ff12854a58 100644 --- a/frame/support/src/storage/bounded_btree_set.rs +++ b/frame/support/src/storage/bounded_btree_set.rs @@ -20,7 +20,7 @@ use crate::{storage::StorageDecodeLength, traits::Get}; use codec::{Decode, Encode, MaxEncodedLen}; use sp_std::{ - borrow::Borrow, collections::btree_set::BTreeSet, convert::TryFrom, fmt, marker::PhantomData, + borrow::Borrow, collections::btree_set::BTreeSet, convert::TryFrom, marker::PhantomData, ops::Deref, }; @@ -157,12 +157,12 @@ where } #[cfg(feature = "std")] -impl fmt::Debug for BoundedBTreeSet +impl std::fmt::Debug for BoundedBTreeSet where - BTreeSet: fmt::Debug, + BTreeSet: std::fmt::Debug, S: Get, { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_tuple("BoundedBTreeSet").field(&self.0).field(&Self::bound()).finish() } } diff --git a/frame/support/src/storage/bounded_vec.rs b/frame/support/src/storage/bounded_vec.rs index b45c294f8d4a4..f353127969c5b 100644 --- a/frame/support/src/storage/bounded_vec.rs +++ b/frame/support/src/storage/bounded_vec.rs @@ -28,7 +28,7 @@ use core::{ ops::{Deref, Index, IndexMut}, slice::SliceIndex, }; -use sp_std::{convert::TryFrom, fmt, marker::PhantomData, prelude::*}; +use sp_std::{convert::TryFrom, marker::PhantomData, prelude::*}; /// A bounded vector. /// @@ -200,13 +200,12 @@ impl Default for BoundedVec { } } -#[cfg(feature = "std")] -impl fmt::Debug for BoundedVec +impl sp_std::fmt::Debug for BoundedVec where - T: fmt::Debug, + T: sp_std::fmt::Debug, S: Get, { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { f.debug_tuple("BoundedVec").field(&self.0).field(&Self::bound()).finish() } } diff --git a/frame/support/src/storage/generator/double_map.rs b/frame/support/src/storage/generator/double_map.rs index d28e42028de53..636a10feb1ab3 100644 --- a/frame/support/src/storage/generator/double_map.rs +++ b/frame/support/src/storage/generator/double_map.rs @@ -219,6 +219,7 @@ where previous_key: prefix, drain: false, closure: |_raw_key, mut raw_value| V::decode(&mut raw_value), + phantom: Default::default(), } } @@ -345,6 +346,7 @@ where let mut key_material = G::Hasher2::reverse(raw_key_without_prefix); Ok((K2::decode(&mut key_material)?, V::decode(&mut raw_value)?)) }, + phantom: Default::default(), } } @@ -398,6 +400,7 @@ where let k2 = K2::decode(&mut k2_material)?; Ok((k1, k2, V::decode(&mut raw_value)?)) }, + phantom: Default::default(), } } diff --git a/frame/support/src/storage/generator/map.rs b/frame/support/src/storage/generator/map.rs index 3fd3b9a0ea7b8..1a4225173c4ae 100644 --- a/frame/support/src/storage/generator/map.rs +++ b/frame/support/src/storage/generator/map.rs @@ -138,6 +138,7 @@ where let mut key_material = G::Hasher::reverse(raw_key_without_prefix); Ok((K::decode(&mut key_material)?, V::decode(&mut raw_value)?)) }, + phantom: Default::default(), } } diff --git a/frame/support/src/storage/generator/nmap.rs b/frame/support/src/storage/generator/nmap.rs index 592bcc81341bf..4845673d3d8c2 100755 --- a/frame/support/src/storage/generator/nmap.rs +++ b/frame/support/src/storage/generator/nmap.rs @@ -196,6 +196,7 @@ where previous_key: prefix, drain: false, closure: |_raw_key, mut raw_value| V::decode(&mut raw_value), + phantom: Default::default(), } } @@ -305,6 +306,7 @@ impl> let partial_key = K::decode_partial_key(raw_key_without_prefix)?; Ok((partial_key, V::decode(&mut raw_value)?)) }, + phantom: Default::default(), } } @@ -368,6 +370,7 @@ impl> let (final_key, _) = K::decode_final_key(raw_key_without_prefix)?; Ok((final_key, V::decode(&mut raw_value)?)) }, + phantom: Default::default(), } } diff --git a/frame/support/src/storage/migration.rs b/frame/support/src/storage/migration.rs index eae45b1e96ad0..59422a282aab5 100644 --- a/frame/support/src/storage/migration.rs +++ b/frame/support/src/storage/migration.rs @@ -186,7 +186,7 @@ pub fn storage_iter_with_suffix( Ok((raw_key_without_prefix.to_vec(), value)) }; - PrefixIterator { prefix, previous_key, drain: false, closure } + PrefixIterator { prefix, previous_key, drain: false, closure, phantom: Default::default() } } /// Construct iterator to iterate over map items in `module` for the map called `item`. @@ -219,7 +219,7 @@ pub fn storage_key_iter_with_suffix< let value = T::decode(&mut &raw_value[..])?; Ok((key, value)) }; - PrefixIterator { prefix, previous_key, drain: false, closure } + PrefixIterator { prefix, previous_key, drain: false, closure, phantom: Default::default() } } /// Get a particular value in storage by the `module`, the map's `item` name and the key `hash`. @@ -344,11 +344,12 @@ pub fn move_prefix(from_prefix: &[u8], to_prefix: &[u8]) { return } - let iter = PrefixIterator { + let iter = PrefixIterator::<_> { prefix: from_prefix.to_vec(), previous_key: from_prefix.to_vec(), drain: true, closure: |key, value| Ok((key.to_vec(), value.to_vec())), + phantom: Default::default(), }; for (key, value) in iter { diff --git a/frame/support/src/storage/mod.rs b/frame/support/src/storage/mod.rs index e57a876bf9831..35552e08fef1e 100644 --- a/frame/support/src/storage/mod.rs +++ b/frame/support/src/storage/mod.rs @@ -17,7 +17,7 @@ //! Stuff to do with the runtime's storage. -pub use self::types::StorageEntryMetadata; +pub use self::types::StorageEntryMetadataBuilder; use crate::{ hash::{ReversibleStorageHasher, StorageHasher}, storage::types::{ @@ -786,10 +786,12 @@ pub trait StorageNMap { KArg: EncodeLikeTuple + TupleToEncodedIter; } -/// Iterate over a prefix and decode raw_key and raw_value into `T`. +/// Iterate or drain over a prefix and decode raw_key and raw_value into `T`. /// /// If any decoding fails it skips it and continues to the next key. -pub struct PrefixIterator { +/// +/// If draining, then the hook `OnRemoval::on_removal` is called after each removal. +pub struct PrefixIterator { prefix: Vec, previous_key: Vec, /// If true then value are removed while iterating @@ -797,9 +799,21 @@ pub struct PrefixIterator { /// Function that take `(raw_key_without_prefix, raw_value)` and decode `T`. /// `raw_key_without_prefix` is the raw storage key without the prefix iterated on. closure: fn(&[u8], &[u8]) -> Result, + phantom: core::marker::PhantomData, +} + +/// Trait for specialising on removal logic of [`PrefixIterator`]. +pub trait PrefixIteratorOnRemoval { + /// This function is called whenever a key/value is removed. + fn on_removal(key: &[u8], value: &[u8]); +} + +/// No-op implementation. +impl PrefixIteratorOnRemoval for () { + fn on_removal(_key: &[u8], _value: &[u8]) {} } -impl PrefixIterator { +impl PrefixIterator { /// Creates a new `PrefixIterator`, iterating after `previous_key` and filtering out keys that /// are not prefixed with `prefix`. /// @@ -813,7 +827,13 @@ impl PrefixIterator { previous_key: Vec, decode_fn: fn(&[u8], &[u8]) -> Result, ) -> Self { - PrefixIterator { prefix, previous_key, drain: false, closure: decode_fn } + PrefixIterator { + prefix, + previous_key, + drain: false, + closure: decode_fn, + phantom: Default::default(), + } } /// Get the last key that has been iterated upon and return it. @@ -838,7 +858,7 @@ impl PrefixIterator { } } -impl Iterator for PrefixIterator { +impl Iterator for PrefixIterator { type Item = T; fn next(&mut self) -> Option { @@ -859,7 +879,8 @@ impl Iterator for PrefixIterator { }, }; if self.drain { - unhashed::kill(&self.previous_key) + unhashed::kill(&self.previous_key); + OnRemoval::on_removal(&self.previous_key, &raw_value); } let raw_key_without_prefix = &self.previous_key[self.prefix.len()..]; let item = match (self.closure)(raw_key_without_prefix, &raw_value[..]) { @@ -1119,7 +1140,7 @@ pub trait StoragePrefixedMap { /// Iter over all value of the storage. /// - /// NOTE: If a value failed to decode becaues storage is corrupted then it is skipped. + /// NOTE: If a value failed to decode because storage is corrupted then it is skipped. fn iter_values() -> PrefixIterator { let prefix = Self::final_prefix(); PrefixIterator { @@ -1127,6 +1148,7 @@ pub trait StoragePrefixedMap { previous_key: prefix.to_vec(), drain: false, closure: |_raw_key, mut raw_value| Value::decode(&mut raw_value), + phantom: Default::default(), } } @@ -1613,7 +1635,7 @@ mod test { assert_eq!(final_vec, vec![1, 2, 3, 4, 5]); - let mut iter = PrefixIterator::new( + let mut iter = PrefixIterator::<_>::new( iter.prefix().to_vec(), stored_key, |mut raw_key_without_prefix, mut raw_value| { diff --git a/frame/support/src/storage/types/counted_map.rs b/frame/support/src/storage/types/counted_map.rs new file mode 100644 index 0000000000000..0860a4ed541c6 --- /dev/null +++ b/frame/support/src/storage/types/counted_map.rs @@ -0,0 +1,1040 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Storage counted map type. + +use crate::{ + metadata::StorageEntryMetadata, + storage::{ + generator::StorageMap as _, + types::{ + OptionQuery, QueryKindTrait, StorageEntryMetadataBuilder, StorageMap, StorageValue, + ValueQuery, + }, + StorageAppend, StorageDecodeLength, StorageTryAppend, + }, + traits::{Get, GetDefault, StorageInfo, StorageInfoTrait, StorageInstance}, + Never, +}; +use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen, Ref}; +use sp_runtime::traits::Saturating; +use sp_std::prelude::*; + +/// A wrapper around a `StorageMap` and a `StorageValue` to keep track of how many items +/// are in a map, without needing to iterate all the values. +/// +/// This storage item has additional storage read and write overhead when manipulating values +/// compared to a regular storage map. +/// +/// For functions where we only add or remove a value, a single storage read is needed to check if +/// that value already exists. For mutate functions, two storage reads are used to check if the +/// value existed before and after the mutation. +/// +/// Whenever the counter needs to be updated, an additional read and write occurs to update that +/// counter. +pub struct CountedStorageMap< + Prefix, + Hasher, + Key, + Value, + QueryKind = OptionQuery, + OnEmpty = GetDefault, + MaxValues = GetDefault, +>(core::marker::PhantomData<(Prefix, Hasher, Key, Value, QueryKind, OnEmpty, MaxValues)>); + +/// The requirement for an instance of [`CountedStorageMap`]. +pub trait CountedStorageMapInstance: StorageInstance { + /// The prefix to use for the counter storage value. + type CounterPrefix: StorageInstance; +} + +// Private helper trait to access map from counted storage map. +trait MapWrapper { + type Map; +} + +impl MapWrapper + for CountedStorageMap +{ + type Map = StorageMap; +} + +type CounterFor

= StorageValue<

::CounterPrefix, u32, ValueQuery>; + +/// On removal logic for updating counter while draining upon some prefix with +/// [`crate::storage::PrefixIterator`]. +pub struct OnRemovalCounterUpdate(core::marker::PhantomData); + +impl crate::storage::PrefixIteratorOnRemoval + for OnRemovalCounterUpdate +{ + fn on_removal(_key: &[u8], _value: &[u8]) { + CounterFor::::mutate(|value| value.saturating_dec()); + } +} + +impl + CountedStorageMap +where + Prefix: CountedStorageMapInstance, + Hasher: crate::hash::StorageHasher, + Key: FullCodec, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + /// Get the storage key used to fetch a value corresponding to a specific key. + pub fn hashed_key_for>(key: KeyArg) -> Vec { + ::Map::hashed_key_for(key) + } + + /// Does the value (explicitly) exist in storage? + pub fn contains_key>(key: KeyArg) -> bool { + ::Map::contains_key(key) + } + + /// Load the value associated with the given key from the map. + pub fn get>(key: KeyArg) -> QueryKind::Query { + ::Map::get(key) + } + + /// Try to get the value for the given key from the map. + /// + /// Returns `Ok` if it exists, `Err` if not. + pub fn try_get>(key: KeyArg) -> Result { + ::Map::try_get(key) + } + + /// Swap the values of two keys. + pub fn swap, KeyArg2: EncodeLike>(key1: KeyArg1, key2: KeyArg2) { + ::Map::swap(key1, key2) + } + + /// Store a value to be associated with the given key from the map. + pub fn insert + Clone, ValArg: EncodeLike>( + key: KeyArg, + val: ValArg, + ) { + if !::Map::contains_key(Ref::from(&key)) { + CounterFor::::mutate(|value| value.saturating_inc()); + } + ::Map::insert(key, val) + } + + /// Remove the value under a key. + pub fn remove + Clone>(key: KeyArg) { + if ::Map::contains_key(Ref::from(&key)) { + CounterFor::::mutate(|value| value.saturating_dec()); + } + ::Map::remove(key) + } + + /// Mutate the value under a key. + pub fn mutate + Clone, R, F: FnOnce(&mut QueryKind::Query) -> R>( + key: KeyArg, + f: F, + ) -> R { + Self::try_mutate(key, |v| Ok::(f(v))) + .expect("`Never` can not be constructed; qed") + } + + /// Mutate the item, only if an `Ok` value is returned. + pub fn try_mutate(key: KeyArg, f: F) -> Result + where + KeyArg: EncodeLike + Clone, + F: FnOnce(&mut QueryKind::Query) -> Result, + { + Self::try_mutate_exists(key, |option_value_ref| { + let option_value = core::mem::replace(option_value_ref, None); + let mut query = ::Map::from_optional_value_to_query(option_value); + let res = f(&mut query); + let option_value = ::Map::from_query_to_optional_value(query); + let _ = core::mem::replace(option_value_ref, option_value); + res + }) + } + + /// Mutate the value under a key. Deletes the item if mutated to a `None`. + pub fn mutate_exists + Clone, R, F: FnOnce(&mut Option) -> R>( + key: KeyArg, + f: F, + ) -> R { + Self::try_mutate_exists(key, |v| Ok::(f(v))) + .expect("`Never` can not be constructed; qed") + } + + /// Mutate the item, only if an `Ok` value is returned. Deletes the item if mutated to a `None`. + pub fn try_mutate_exists(key: KeyArg, f: F) -> Result + where + KeyArg: EncodeLike + Clone, + F: FnOnce(&mut Option) -> Result, + { + ::Map::try_mutate_exists(key, |option_value| { + let existed = option_value.is_some(); + let res = f(option_value); + let exist = option_value.is_some(); + + if res.is_ok() { + if existed && !exist { + // Value was deleted + CounterFor::::mutate(|value| value.saturating_dec()); + } else if !existed && exist { + // Value was added + CounterFor::::mutate(|value| value.saturating_inc()); + } + } + res + }) + } + + /// Take the value under a key. + pub fn take + Clone>(key: KeyArg) -> QueryKind::Query { + let removed_value = + ::Map::mutate_exists(key, |value| core::mem::replace(value, None)); + if removed_value.is_some() { + CounterFor::::mutate(|value| value.saturating_dec()); + } + ::Map::from_optional_value_to_query(removed_value) + } + + /// Append the given items to the value in the storage. + /// + /// `Value` is required to implement `codec::EncodeAppend`. + /// + /// # Warning + /// + /// If the storage item is not encoded properly, the storage will be overwritten and set to + /// `[item]`. Any default value set for the storage item will be ignored on overwrite. + pub fn append(key: EncodeLikeKey, item: EncodeLikeItem) + where + EncodeLikeKey: EncodeLike + Clone, + Item: Encode, + EncodeLikeItem: EncodeLike, + Value: StorageAppend, + { + if !::Map::contains_key(Ref::from(&key)) { + CounterFor::::mutate(|value| value.saturating_inc()); + } + ::Map::append(key, item) + } + + /// Read the length of the storage value without decoding the entire value under the given + /// `key`. + /// + /// `Value` is required to implement [`StorageDecodeLength`]. + /// + /// If the value does not exists or it fails to decode the length, `None` is returned. Otherwise + /// `Some(len)` is returned. + /// + /// # Warning + /// + /// `None` does not mean that `get()` does not return a value. The default value is completly + /// ignored by this function. + pub fn decode_len>(key: KeyArg) -> Option + where + Value: StorageDecodeLength, + { + ::Map::decode_len(key) + } + + /// Migrate an item with the given `key` from a defunct `OldHasher` to the current hasher. + /// + /// If the key doesn't exist, then it's a no-op. If it does, then it returns its value. + pub fn migrate_key>( + key: KeyArg, + ) -> Option { + ::Map::migrate_key::(key) + } + + /// Remove all value of the storage. + pub fn remove_all() { + CounterFor::::set(0u32); + ::Map::remove_all(None); + } + + /// Iter over all value of the storage. + /// + /// NOTE: If a value failed to decode because storage is corrupted then it is skipped. + pub fn iter_values() -> crate::storage::PrefixIterator> { + let map_iterator = ::Map::iter_values(); + crate::storage::PrefixIterator { + prefix: map_iterator.prefix, + previous_key: map_iterator.previous_key, + drain: map_iterator.drain, + closure: map_iterator.closure, + phantom: Default::default(), + } + } + + /// Translate the values of all elements by a function `f`, in the map in no particular order. + /// + /// By returning `None` from `f` for an element, you'll remove it from the map. + /// + /// NOTE: If a value fail to decode because storage is corrupted then it is skipped. + /// + /// # Warning + /// + /// This function must be used with care, before being updated the storage still contains the + /// old type, thus other calls (such as `get`) will fail at decoding it. + /// + /// # Usage + /// + /// This would typically be called inside the module implementation of on_runtime_upgrade. + pub fn translate_values Option>(mut f: F) { + ::Map::translate_values(|old_value| { + let res = f(old_value); + if res.is_none() { + CounterFor::::mutate(|value| value.saturating_dec()); + } + res + }) + } + + /// Try and append the given item to the value in the storage. + /// + /// Is only available if `Value` of the storage implements [`StorageTryAppend`]. + pub fn try_append(key: KArg, item: EncodeLikeItem) -> Result<(), ()> + where + KArg: EncodeLike + Clone, + Item: Encode, + EncodeLikeItem: EncodeLike, + Value: StorageTryAppend, + { + let bound = Value::bound(); + let current = ::Map::decode_len(Ref::from(&key)).unwrap_or_default(); + if current < bound { + CounterFor::::mutate(|value| value.saturating_inc()); + let key = ::Map::hashed_key_for(key); + sp_io::storage::append(&key, item.encode()); + Ok(()) + } else { + Err(()) + } + } + + /// Initialize the counter with the actual number of items in the map. + /// + /// This function iterates through all the items in the map and sets the counter. This operation + /// can be very heavy, so use with caution. + /// + /// Returns the number of items in the map which is used to set the counter. + pub fn initialize_counter() -> u32 { + let count = Self::iter_values().count() as u32; + CounterFor::::set(count); + count + } + + /// Return the count. + pub fn count() -> u32 { + CounterFor::::get() + } +} + +impl + CountedStorageMap +where + Prefix: CountedStorageMapInstance, + Hasher: crate::hash::StorageHasher + crate::ReversibleStorageHasher, + Key: FullCodec, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + /// Enumerate all elements in the map in no particular order. + /// + /// If you alter the map while doing this, you'll get undefined results. + pub fn iter() -> crate::storage::PrefixIterator<(Key, Value), OnRemovalCounterUpdate> { + let map_iterator = ::Map::iter(); + crate::storage::PrefixIterator { + prefix: map_iterator.prefix, + previous_key: map_iterator.previous_key, + drain: map_iterator.drain, + closure: map_iterator.closure, + phantom: Default::default(), + } + } + + /// Remove all elements from the map and iterate through them in no particular order. + /// + /// If you add elements to the map while doing this, you'll get undefined results. + pub fn drain() -> crate::storage::PrefixIterator<(Key, Value), OnRemovalCounterUpdate> { + let map_iterator = ::Map::drain(); + crate::storage::PrefixIterator { + prefix: map_iterator.prefix, + previous_key: map_iterator.previous_key, + drain: map_iterator.drain, + closure: map_iterator.closure, + phantom: Default::default(), + } + } + + /// Translate the values of all elements by a function `f`, in the map in no particular order. + /// + /// By returning `None` from `f` for an element, you'll remove it from the map. + /// + /// NOTE: If a value fail to decode because storage is corrupted then it is skipped. + pub fn translate Option>(mut f: F) { + ::Map::translate(|key, old_value| { + let res = f(key, old_value); + if res.is_none() { + CounterFor::::mutate(|value| value.saturating_dec()); + } + res + }) + } +} + +impl StorageEntryMetadataBuilder + for CountedStorageMap +where + Prefix: CountedStorageMapInstance, + Hasher: crate::hash::StorageHasher, + Key: FullCodec + scale_info::StaticTypeInfo, + Value: FullCodec + scale_info::StaticTypeInfo, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + fn build_metadata(docs: Vec<&'static str>, entries: &mut Vec) { + ::Map::build_metadata(docs, entries); + CounterFor::::build_metadata( + vec![&"Counter for the related counted storage map"], + entries, + ); + } +} + +impl crate::traits::StorageInfoTrait + for CountedStorageMap +where + Prefix: CountedStorageMapInstance, + Hasher: crate::hash::StorageHasher, + Key: FullCodec + MaxEncodedLen, + Value: FullCodec + MaxEncodedLen, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + fn storage_info() -> Vec { + [::Map::storage_info(), CounterFor::::storage_info()].concat() + } +} + +/// It doesn't require to implement `MaxEncodedLen` and give no information for `max_size`. +impl + crate::traits::PartialStorageInfoTrait + for CountedStorageMap +where + Prefix: CountedStorageMapInstance, + Hasher: crate::hash::StorageHasher, + Key: FullCodec, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + fn partial_storage_info() -> Vec { + [::Map::partial_storage_info(), CounterFor::::storage_info()] + .concat() + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::{ + hash::*, + metadata::{StorageEntryModifier, StorageEntryType, StorageHasher}, + storage::{bounded_vec::BoundedVec, types::ValueQuery}, + traits::ConstU32, + }; + use sp_io::{hashing::twox_128, TestExternalities}; + + struct Prefix; + impl StorageInstance for Prefix { + fn pallet_prefix() -> &'static str { + "test" + } + const STORAGE_PREFIX: &'static str = "foo"; + } + + struct CounterPrefix; + impl StorageInstance for CounterPrefix { + fn pallet_prefix() -> &'static str { + "test" + } + const STORAGE_PREFIX: &'static str = "counter_for_foo"; + } + impl CountedStorageMapInstance for Prefix { + type CounterPrefix = CounterPrefix; + } + + struct ADefault; + impl crate::traits::Get for ADefault { + fn get() -> u32 { + 97 + } + } + + #[test] + fn test_value_query() { + type A = CountedStorageMap; + + TestExternalities::default().execute_with(|| { + let mut k: Vec = vec![]; + k.extend(&twox_128(b"test")); + k.extend(&twox_128(b"foo")); + k.extend(&3u16.twox_64_concat()); + assert_eq!(A::hashed_key_for(3).to_vec(), k); + + assert_eq!(A::contains_key(3), false); + assert_eq!(A::get(3), ADefault::get()); + assert_eq!(A::try_get(3), Err(())); + assert_eq!(A::count(), 0); + + // Insert non-existing. + A::insert(3, 10); + + assert_eq!(A::contains_key(3), true); + assert_eq!(A::get(3), 10); + assert_eq!(A::try_get(3), Ok(10)); + assert_eq!(A::count(), 1); + + // Swap non-existing with existing. + A::swap(4, 3); + + assert_eq!(A::contains_key(3), false); + assert_eq!(A::get(3), ADefault::get()); + assert_eq!(A::try_get(3), Err(())); + assert_eq!(A::contains_key(4), true); + assert_eq!(A::get(4), 10); + assert_eq!(A::try_get(4), Ok(10)); + assert_eq!(A::count(), 1); + + // Swap existing with non-existing. + A::swap(4, 3); + + assert_eq!(A::try_get(3), Ok(10)); + assert_eq!(A::contains_key(4), false); + assert_eq!(A::get(4), ADefault::get()); + assert_eq!(A::try_get(4), Err(())); + assert_eq!(A::count(), 1); + + A::insert(4, 11); + + assert_eq!(A::try_get(3), Ok(10)); + assert_eq!(A::try_get(4), Ok(11)); + assert_eq!(A::count(), 2); + + // Swap 2 existing. + A::swap(3, 4); + + assert_eq!(A::try_get(3), Ok(11)); + assert_eq!(A::try_get(4), Ok(10)); + assert_eq!(A::count(), 2); + + // Insert an existing key, shouldn't increment counted values. + A::insert(3, 11); + + assert_eq!(A::count(), 2); + + // Remove non-existing. + A::remove(2); + + assert_eq!(A::contains_key(2), false); + assert_eq!(A::count(), 2); + + // Remove existing. + A::remove(3); + + assert_eq!(A::try_get(3), Err(())); + assert_eq!(A::count(), 1); + + // Mutate non-existing to existing. + A::mutate(3, |query| { + assert_eq!(*query, ADefault::get()); + *query = 40; + }); + + assert_eq!(A::try_get(3), Ok(40)); + assert_eq!(A::count(), 2); + + // Mutate existing to existing. + A::mutate(3, |query| { + assert_eq!(*query, 40); + *query = 40; + }); + + assert_eq!(A::try_get(3), Ok(40)); + assert_eq!(A::count(), 2); + + // Try fail mutate non-existing to existing. + A::try_mutate(2, |query| { + assert_eq!(*query, ADefault::get()); + *query = 4; + Result::<(), ()>::Err(()) + }) + .err() + .unwrap(); + + assert_eq!(A::try_get(2), Err(())); + assert_eq!(A::count(), 2); + + // Try succeed mutate non-existing to existing. + A::try_mutate(2, |query| { + assert_eq!(*query, ADefault::get()); + *query = 41; + Result::<(), ()>::Ok(()) + }) + .unwrap(); + + assert_eq!(A::try_get(2), Ok(41)); + assert_eq!(A::count(), 3); + + // Try succeed mutate existing to existing. + A::try_mutate(2, |query| { + assert_eq!(*query, 41); + *query = 41; + Result::<(), ()>::Ok(()) + }) + .unwrap(); + + assert_eq!(A::try_get(2), Ok(41)); + assert_eq!(A::count(), 3); + + // Try fail mutate non-existing to existing. + A::try_mutate_exists(1, |query| { + assert_eq!(*query, None); + *query = Some(4); + Result::<(), ()>::Err(()) + }) + .err() + .unwrap(); + + assert_eq!(A::try_get(1), Err(())); + assert_eq!(A::count(), 3); + + // Try succeed mutate non-existing to existing. + A::try_mutate_exists(1, |query| { + assert_eq!(*query, None); + *query = Some(43); + Result::<(), ()>::Ok(()) + }) + .unwrap(); + + assert_eq!(A::try_get(1), Ok(43)); + assert_eq!(A::count(), 4); + + // Try succeed mutate existing to existing. + A::try_mutate_exists(1, |query| { + assert_eq!(*query, Some(43)); + *query = Some(43); + Result::<(), ()>::Ok(()) + }) + .unwrap(); + + assert_eq!(A::try_get(1), Ok(43)); + assert_eq!(A::count(), 4); + + // Try succeed mutate existing to non-existing. + A::try_mutate_exists(1, |query| { + assert_eq!(*query, Some(43)); + *query = None; + Result::<(), ()>::Ok(()) + }) + .unwrap(); + + assert_eq!(A::try_get(1), Err(())); + assert_eq!(A::count(), 3); + + // Take exsisting. + assert_eq!(A::take(4), 10); + + assert_eq!(A::try_get(4), Err(())); + assert_eq!(A::count(), 2); + + // Take non-exsisting. + assert_eq!(A::take(4), ADefault::get()); + + assert_eq!(A::try_get(4), Err(())); + assert_eq!(A::count(), 2); + + // Remove all. + A::remove_all(); + + assert_eq!(A::count(), 0); + assert_eq!(A::initialize_counter(), 0); + + A::insert(1, 1); + A::insert(2, 2); + + // Iter values. + assert_eq!(A::iter_values().collect::>(), vec![2, 1]); + + // Iter drain values. + assert_eq!(A::iter_values().drain().collect::>(), vec![2, 1]); + assert_eq!(A::count(), 0); + + A::insert(1, 1); + A::insert(2, 2); + + // Test initialize_counter. + assert_eq!(A::initialize_counter(), 2); + }) + } + + #[test] + fn test_option_query() { + type B = CountedStorageMap; + + TestExternalities::default().execute_with(|| { + let mut k: Vec = vec![]; + k.extend(&twox_128(b"test")); + k.extend(&twox_128(b"foo")); + k.extend(&3u16.twox_64_concat()); + assert_eq!(B::hashed_key_for(3).to_vec(), k); + + assert_eq!(B::contains_key(3), false); + assert_eq!(B::get(3), None); + assert_eq!(B::try_get(3), Err(())); + assert_eq!(B::count(), 0); + + // Insert non-existing. + B::insert(3, 10); + + assert_eq!(B::contains_key(3), true); + assert_eq!(B::get(3), Some(10)); + assert_eq!(B::try_get(3), Ok(10)); + assert_eq!(B::count(), 1); + + // Swap non-existing with existing. + B::swap(4, 3); + + assert_eq!(B::contains_key(3), false); + assert_eq!(B::get(3), None); + assert_eq!(B::try_get(3), Err(())); + assert_eq!(B::contains_key(4), true); + assert_eq!(B::get(4), Some(10)); + assert_eq!(B::try_get(4), Ok(10)); + assert_eq!(B::count(), 1); + + // Swap existing with non-existing. + B::swap(4, 3); + + assert_eq!(B::try_get(3), Ok(10)); + assert_eq!(B::contains_key(4), false); + assert_eq!(B::get(4), None); + assert_eq!(B::try_get(4), Err(())); + assert_eq!(B::count(), 1); + + B::insert(4, 11); + + assert_eq!(B::try_get(3), Ok(10)); + assert_eq!(B::try_get(4), Ok(11)); + assert_eq!(B::count(), 2); + + // Swap 2 existing. + B::swap(3, 4); + + assert_eq!(B::try_get(3), Ok(11)); + assert_eq!(B::try_get(4), Ok(10)); + assert_eq!(B::count(), 2); + + // Insert an existing key, shouldn't increment counted values. + B::insert(3, 11); + + assert_eq!(B::count(), 2); + + // Remove non-existing. + B::remove(2); + + assert_eq!(B::contains_key(2), false); + assert_eq!(B::count(), 2); + + // Remove existing. + B::remove(3); + + assert_eq!(B::try_get(3), Err(())); + assert_eq!(B::count(), 1); + + // Mutate non-existing to existing. + B::mutate(3, |query| { + assert_eq!(*query, None); + *query = Some(40) + }); + + assert_eq!(B::try_get(3), Ok(40)); + assert_eq!(B::count(), 2); + + // Mutate existing to existing. + B::mutate(3, |query| { + assert_eq!(*query, Some(40)); + *query = Some(40) + }); + + assert_eq!(B::try_get(3), Ok(40)); + assert_eq!(B::count(), 2); + + // Mutate existing to non-existing. + B::mutate(3, |query| { + assert_eq!(*query, Some(40)); + *query = None + }); + + assert_eq!(B::try_get(3), Err(())); + assert_eq!(B::count(), 1); + + B::insert(3, 40); + + // Try fail mutate non-existing to existing. + B::try_mutate(2, |query| { + assert_eq!(*query, None); + *query = Some(4); + Result::<(), ()>::Err(()) + }) + .err() + .unwrap(); + + assert_eq!(B::try_get(2), Err(())); + assert_eq!(B::count(), 2); + + // Try succeed mutate non-existing to existing. + B::try_mutate(2, |query| { + assert_eq!(*query, None); + *query = Some(41); + Result::<(), ()>::Ok(()) + }) + .unwrap(); + + assert_eq!(B::try_get(2), Ok(41)); + assert_eq!(B::count(), 3); + + // Try succeed mutate existing to existing. + B::try_mutate(2, |query| { + assert_eq!(*query, Some(41)); + *query = Some(41); + Result::<(), ()>::Ok(()) + }) + .unwrap(); + + assert_eq!(B::try_get(2), Ok(41)); + assert_eq!(B::count(), 3); + + // Try succeed mutate existing to non-existing. + B::try_mutate(2, |query| { + assert_eq!(*query, Some(41)); + *query = None; + Result::<(), ()>::Ok(()) + }) + .unwrap(); + + assert_eq!(B::try_get(2), Err(())); + assert_eq!(B::count(), 2); + + B::insert(2, 41); + + // Try fail mutate non-existing to existing. + B::try_mutate_exists(1, |query| { + assert_eq!(*query, None); + *query = Some(4); + Result::<(), ()>::Err(()) + }) + .err() + .unwrap(); + + assert_eq!(B::try_get(1), Err(())); + assert_eq!(B::count(), 3); + + // Try succeed mutate non-existing to existing. + B::try_mutate_exists(1, |query| { + assert_eq!(*query, None); + *query = Some(43); + Result::<(), ()>::Ok(()) + }) + .unwrap(); + + assert_eq!(B::try_get(1), Ok(43)); + assert_eq!(B::count(), 4); + + // Try succeed mutate existing to existing. + B::try_mutate_exists(1, |query| { + assert_eq!(*query, Some(43)); + *query = Some(43); + Result::<(), ()>::Ok(()) + }) + .unwrap(); + + assert_eq!(B::try_get(1), Ok(43)); + assert_eq!(B::count(), 4); + + // Try succeed mutate existing to non-existing. + B::try_mutate_exists(1, |query| { + assert_eq!(*query, Some(43)); + *query = None; + Result::<(), ()>::Ok(()) + }) + .unwrap(); + + assert_eq!(B::try_get(1), Err(())); + assert_eq!(B::count(), 3); + + // Take exsisting. + assert_eq!(B::take(4), Some(10)); + + assert_eq!(B::try_get(4), Err(())); + assert_eq!(B::count(), 2); + + // Take non-exsisting. + assert_eq!(B::take(4), None); + + assert_eq!(B::try_get(4), Err(())); + assert_eq!(B::count(), 2); + + // Remove all. + B::remove_all(); + + assert_eq!(B::count(), 0); + assert_eq!(B::initialize_counter(), 0); + + B::insert(1, 1); + B::insert(2, 2); + + // Iter values. + assert_eq!(B::iter_values().collect::>(), vec![2, 1]); + + // Iter drain values. + assert_eq!(B::iter_values().drain().collect::>(), vec![2, 1]); + assert_eq!(B::count(), 0); + + B::insert(1, 1); + B::insert(2, 2); + + // Test initialize_counter. + assert_eq!(B::initialize_counter(), 2); + }) + } + + #[test] + fn append_decode_len_works() { + type B = CountedStorageMap>; + + TestExternalities::default().execute_with(|| { + assert_eq!(B::decode_len(0), None); + B::append(0, 3); + assert_eq!(B::decode_len(0), Some(1)); + B::append(0, 3); + assert_eq!(B::decode_len(0), Some(2)); + B::append(0, 3); + assert_eq!(B::decode_len(0), Some(3)); + }) + } + + #[test] + fn try_append_decode_len_works() { + type B = CountedStorageMap>>; + + TestExternalities::default().execute_with(|| { + assert_eq!(B::decode_len(0), None); + B::try_append(0, 3).unwrap(); + assert_eq!(B::decode_len(0), Some(1)); + B::try_append(0, 3).unwrap(); + assert_eq!(B::decode_len(0), Some(2)); + B::try_append(0, 3).unwrap(); + assert_eq!(B::decode_len(0), Some(3)); + B::try_append(0, 3).err().unwrap(); + assert_eq!(B::decode_len(0), Some(3)); + }) + } + + #[test] + fn migrate_keys_works() { + type A = CountedStorageMap; + type B = CountedStorageMap; + TestExternalities::default().execute_with(|| { + A::insert(1, 1); + assert_eq!(B::migrate_key::(1), Some(1)); + assert_eq!(B::get(1), Some(1)); + }) + } + + #[test] + fn translate_values() { + type A = CountedStorageMap; + TestExternalities::default().execute_with(|| { + A::insert(1, 1); + A::insert(2, 2); + A::translate_values::(|old_value| if old_value == 1 { None } else { Some(1) }); + assert_eq!(A::count(), 1); + assert_eq!(A::get(2), Some(1)); + }) + } + + #[test] + fn test_iter_drain_translate() { + type A = CountedStorageMap; + TestExternalities::default().execute_with(|| { + A::insert(1, 1); + A::insert(2, 2); + + assert_eq!(A::iter().collect::>(), vec![(2, 2), (1, 1)]); + + assert_eq!(A::count(), 2); + + A::translate::( + |key, value| if key == 1 { None } else { Some(key as u32 * value) }, + ); + + assert_eq!(A::count(), 1); + + assert_eq!(A::drain().collect::>(), vec![(2, 4)]); + + assert_eq!(A::count(), 0); + }) + } + + #[test] + fn test_metadata() { + type A = CountedStorageMap; + let mut entries = vec![]; + A::build_metadata(vec![], &mut entries); + assert_eq!( + entries, + vec![ + StorageEntryMetadata { + name: "foo", + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Map { + hashers: vec![StorageHasher::Twox64Concat], + key: scale_info::meta_type::(), + value: scale_info::meta_type::(), + }, + default: 97u32.encode(), + docs: vec![], + }, + StorageEntryMetadata { + name: "counter_for_foo", + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(scale_info::meta_type::()), + default: vec![0, 0, 0, 0], + docs: vec!["Counter for the related counted storage map"], + }, + ] + ); + } +} diff --git a/frame/support/src/storage/types/double_map.rs b/frame/support/src/storage/types/double_map.rs index 7750110050868..b9af4a621b92a 100644 --- a/frame/support/src/storage/types/double_map.rs +++ b/frame/support/src/storage/types/double_map.rs @@ -19,9 +19,9 @@ //! StoragePrefixedDoubleMap traits and their methods directly. use crate::{ - metadata::{StorageEntryModifier, StorageEntryType}, + metadata::{StorageEntryMetadata, StorageEntryType}, storage::{ - types::{OptionQuery, QueryKindTrait, StorageEntryMetadata}, + types::{OptionQuery, QueryKindTrait, StorageEntryMetadataBuilder}, StorageAppend, StorageDecodeLength, StoragePrefixedMap, StorageTryAppend, }, traits::{Get, GetDefault, StorageInfo, StorageInstance}, @@ -342,7 +342,7 @@ where /// Iter over all value of the storage. /// - /// NOTE: If a value failed to decode becaues storage is corrupted then it is skipped. + /// NOTE: If a value failed to decode because storage is corrupted then it is skipped. pub fn iter_values() -> crate::storage::PrefixIterator { >::iter_values() } @@ -512,7 +512,7 @@ where } impl - StorageEntryMetadata + StorageEntryMetadataBuilder for StorageDoubleMap where Prefix: StorageInstance, @@ -525,19 +525,20 @@ where OnEmpty: Get + 'static, MaxValues: Get>, { - const MODIFIER: StorageEntryModifier = QueryKind::METADATA; - const NAME: &'static str = Prefix::STORAGE_PREFIX; - - fn ty() -> StorageEntryType { - StorageEntryType::Map { - hashers: vec![Hasher1::METADATA, Hasher2::METADATA], - key: scale_info::meta_type::<(Key1, Key2)>(), - value: scale_info::meta_type::(), - } - } - - fn default() -> Vec { - OnEmpty::get().encode() + fn build_metadata(docs: Vec<&'static str>, entries: &mut Vec) { + let entry = StorageEntryMetadata { + name: Prefix::STORAGE_PREFIX, + modifier: QueryKind::METADATA, + ty: StorageEntryType::Map { + hashers: vec![Hasher1::METADATA, Hasher2::METADATA], + key: scale_info::meta_type::<(Key1, Key2)>(), + value: scale_info::meta_type::(), + }, + default: OnEmpty::get().encode(), + docs, + }; + + entries.push(entry); } } @@ -605,7 +606,6 @@ mod test { metadata::{StorageEntryModifier, StorageEntryType, StorageHasher}, storage::types::ValueQuery, }; - use assert_matches::assert_matches; use sp_io::{hashing::twox_128, TestExternalities}; struct Prefix; @@ -767,30 +767,43 @@ mod test { A::translate::(|k1, k2, v| Some((k1 * k2 as u16 * v as u16).into())); assert_eq!(A::iter().collect::>(), vec![(4, 40, 1600), (3, 30, 900)]); - assert_eq!(A::MODIFIER, StorageEntryModifier::Optional); - assert_eq!(AValueQueryWithAnOnEmpty::MODIFIER, StorageEntryModifier::Default); - - let assert_map_hashers = |ty, expected_hashers| { - if let StorageEntryType::Map { hashers, .. } = ty { - assert_eq!(hashers, expected_hashers) - } else { - assert_matches!(ty, StorageEntryType::Map { .. }) - } - }; - - assert_map_hashers( - A::ty(), - vec![StorageHasher::Blake2_128Concat, StorageHasher::Twox64Concat], - ); - assert_map_hashers( - AValueQueryWithAnOnEmpty::ty(), - vec![StorageHasher::Blake2_128Concat, StorageHasher::Twox64Concat], + let mut entries = vec![]; + A::build_metadata(vec![], &mut entries); + AValueQueryWithAnOnEmpty::build_metadata(vec![], &mut entries); + assert_eq!( + entries, + vec![ + StorageEntryMetadata { + name: "foo", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + hashers: vec![ + StorageHasher::Blake2_128Concat, + StorageHasher::Twox64Concat + ], + key: scale_info::meta_type::<(u16, u8)>(), + value: scale_info::meta_type::(), + }, + default: Option::::None.encode(), + docs: vec![], + }, + StorageEntryMetadata { + name: "foo", + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Map { + hashers: vec![ + StorageHasher::Blake2_128Concat, + StorageHasher::Twox64Concat + ], + key: scale_info::meta_type::<(u16, u8)>(), + value: scale_info::meta_type::(), + }, + default: 97u32.encode(), + docs: vec![], + } + ] ); - assert_eq!(A::NAME, "foo"); - assert_eq!(AValueQueryWithAnOnEmpty::default(), 97u32.encode()); - assert_eq!(A::default(), Option::::None.encode()); - WithLen::remove_all(None); assert_eq!(WithLen::decode_len(3, 30), None); WithLen::append(0, 100, 10); diff --git a/frame/support/src/storage/types/map.rs b/frame/support/src/storage/types/map.rs index a31224f15c80f..45340f9015eaa 100644 --- a/frame/support/src/storage/types/map.rs +++ b/frame/support/src/storage/types/map.rs @@ -19,9 +19,9 @@ //! methods directly. use crate::{ - metadata::{StorageEntryModifier, StorageEntryType}, + metadata::{StorageEntryMetadata, StorageEntryType}, storage::{ - types::{OptionQuery, QueryKindTrait, StorageEntryMetadata}, + types::{OptionQuery, QueryKindTrait, StorageEntryMetadataBuilder}, StorageAppend, StorageDecodeLength, StoragePrefixedMap, StorageTryAppend, }, traits::{Get, GetDefault, StorageInfo, StorageInstance}, @@ -241,7 +241,7 @@ where /// Iter over all value of the storage. /// - /// NOTE: If a value failed to decode becaues storage is corrupted then it is skipped. + /// NOTE: If a value failed to decode because storage is corrupted then it is skipped. pub fn iter_values() -> crate::storage::PrefixIterator { >::iter_values() } @@ -336,7 +336,7 @@ where } } -impl StorageEntryMetadata +impl StorageEntryMetadataBuilder for StorageMap where Prefix: StorageInstance, @@ -347,19 +347,20 @@ where OnEmpty: Get + 'static, MaxValues: Get>, { - const MODIFIER: StorageEntryModifier = QueryKind::METADATA; - const NAME: &'static str = Prefix::STORAGE_PREFIX; - - fn ty() -> StorageEntryType { - StorageEntryType::Map { - hashers: vec![Hasher::METADATA], - key: scale_info::meta_type::(), - value: scale_info::meta_type::(), - } - } - - fn default() -> Vec { - OnEmpty::get().encode() + fn build_metadata(docs: Vec<&'static str>, entries: &mut Vec) { + let entry = StorageEntryMetadata { + name: Prefix::STORAGE_PREFIX, + modifier: QueryKind::METADATA, + ty: StorageEntryType::Map { + hashers: vec![Hasher::METADATA], + key: scale_info::meta_type::(), + value: scale_info::meta_type::(), + }, + default: OnEmpty::get().encode(), + docs, + }; + + entries.push(entry); } } @@ -421,7 +422,6 @@ mod test { metadata::{StorageEntryModifier, StorageEntryType, StorageHasher}, storage::types::ValueQuery, }; - use assert_matches::assert_matches; use sp_io::{hashing::twox_128, TestExternalities}; struct Prefix; @@ -573,25 +573,36 @@ mod test { A::translate::(|k, v| Some((k * v as u16).into())); assert_eq!(A::iter().collect::>(), vec![(4, 40), (3, 30)]); - assert_eq!(A::MODIFIER, StorageEntryModifier::Optional); - assert_eq!(AValueQueryWithAnOnEmpty::MODIFIER, StorageEntryModifier::Default); - - let assert_map_hashers = |ty, expected_hashers| { - if let StorageEntryType::Map { hashers, .. } = ty { - assert_eq!(hashers, expected_hashers) - } else { - assert_matches!(ty, StorageEntryType::Map { .. }) - } - }; - - assert_map_hashers(A::ty(), vec![StorageHasher::Blake2_128Concat]); - assert_map_hashers( - AValueQueryWithAnOnEmpty::ty(), - vec![StorageHasher::Blake2_128Concat], + let mut entries = vec![]; + A::build_metadata(vec![], &mut entries); + AValueQueryWithAnOnEmpty::build_metadata(vec![], &mut entries); + assert_eq!( + entries, + vec![ + StorageEntryMetadata { + name: "foo", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + hashers: vec![StorageHasher::Blake2_128Concat], + key: scale_info::meta_type::(), + value: scale_info::meta_type::(), + }, + default: Option::::None.encode(), + docs: vec![], + }, + StorageEntryMetadata { + name: "foo", + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Map { + hashers: vec![StorageHasher::Blake2_128Concat], + key: scale_info::meta_type::(), + value: scale_info::meta_type::(), + }, + default: 97u32.encode(), + docs: vec![], + } + ] ); - assert_eq!(A::NAME, "foo"); - assert_eq!(AValueQueryWithAnOnEmpty::default(), 97u32.encode()); - assert_eq!(A::default(), Option::::None.encode()); WithLen::remove_all(None); assert_eq!(WithLen::decode_len(3), None); diff --git a/frame/support/src/storage/types/mod.rs b/frame/support/src/storage/types/mod.rs index 76fed0b8cb320..bcab996f68323 100644 --- a/frame/support/src/storage/types/mod.rs +++ b/frame/support/src/storage/types/mod.rs @@ -18,16 +18,18 @@ //! Storage types to build abstraction on storage, they implements storage traits such as //! StorageMap and others. -use crate::metadata::{StorageEntryModifier, StorageEntryType}; +use crate::metadata::{StorageEntryMetadata, StorageEntryModifier}; use codec::FullCodec; use sp_std::prelude::*; +mod counted_map; mod double_map; mod key; mod map; mod nmap; mod value; +pub use counted_map::{CountedStorageMap, CountedStorageMapInstance}; pub use double_map::StorageDoubleMap; pub use key::{ EncodeLikeTuple, HasKeyPrefix, HasReversibleKeyPrefix, Key, KeyGenerator, @@ -103,13 +105,10 @@ where } } -/// Provide metadata for a storage entry. +/// Build the metadata of a storage. /// -/// Implemented by each of the storage entry kinds: value, map, doublemap and nmap. -pub trait StorageEntryMetadata { - const MODIFIER: StorageEntryModifier; - const NAME: &'static str; - - fn ty() -> StorageEntryType; - fn default() -> Vec; +/// Implemented by each of the storage types: value, map, countedmap, doublemap and nmap. +pub trait StorageEntryMetadataBuilder { + /// Build into `entries` the storage metadata entries of a storage given some `docs`. + fn build_metadata(doc: Vec<&'static str>, entries: &mut Vec); } diff --git a/frame/support/src/storage/types/nmap.rs b/frame/support/src/storage/types/nmap.rs index 7048a69d59c2c..96d6f383ae117 100755 --- a/frame/support/src/storage/types/nmap.rs +++ b/frame/support/src/storage/types/nmap.rs @@ -19,11 +19,11 @@ //! StoragePrefixedDoubleMap traits and their methods directly. use crate::{ - metadata::{StorageEntryModifier, StorageEntryType}, + metadata::{StorageEntryMetadata, StorageEntryType}, storage::{ types::{ EncodeLikeTuple, HasKeyPrefix, HasReversibleKeyPrefix, OptionQuery, QueryKindTrait, - StorageEntryMetadata, TupleToEncodedIter, + StorageEntryMetadataBuilder, TupleToEncodedIter, }, KeyGenerator, PrefixIterator, StorageAppend, StorageDecodeLength, StoragePrefixedMap, }, @@ -440,7 +440,7 @@ where } } -impl StorageEntryMetadata +impl StorageEntryMetadataBuilder for StorageNMap where Prefix: StorageInstance, @@ -450,19 +450,20 @@ where OnEmpty: Get + 'static, MaxValues: Get>, { - const MODIFIER: StorageEntryModifier = QueryKind::METADATA; - const NAME: &'static str = Prefix::STORAGE_PREFIX; - - fn ty() -> StorageEntryType { - StorageEntryType::Map { - key: scale_info::meta_type::(), - hashers: Key::HASHER_METADATA.iter().cloned().collect(), - value: scale_info::meta_type::(), - } - } - - fn default() -> Vec { - OnEmpty::get().encode() + fn build_metadata(docs: Vec<&'static str>, entries: &mut Vec) { + let entry = StorageEntryMetadata { + name: Prefix::STORAGE_PREFIX, + modifier: QueryKind::METADATA, + ty: StorageEntryType::Map { + key: scale_info::meta_type::(), + hashers: Key::HASHER_METADATA.iter().cloned().collect(), + value: scale_info::meta_type::(), + }, + default: OnEmpty::get().encode(), + docs, + }; + + entries.push(entry); } } @@ -516,8 +517,8 @@ where mod test { use super::*; use crate::{ - hash::*, - metadata::StorageEntryModifier, + hash::{StorageHasher as _, *}, + metadata::{StorageEntryModifier, StorageHasher}, storage::types::{Key, ValueQuery}, }; use sp_io::{hashing::twox_128, TestExternalities}; @@ -684,11 +685,36 @@ mod test { A::translate::(|k1, v| Some((k1 as u16 * v as u16).into())); assert_eq!(A::iter().collect::>(), vec![(4, 40), (3, 30)]); - assert_eq!(A::MODIFIER, StorageEntryModifier::Optional); - assert_eq!(AValueQueryWithAnOnEmpty::MODIFIER, StorageEntryModifier::Default); - assert_eq!(A::NAME, "Foo"); - assert_eq!(AValueQueryWithAnOnEmpty::default(), 98u32.encode()); - assert_eq!(A::default(), Option::::None.encode()); + let mut entries = vec![]; + A::build_metadata(vec![], &mut entries); + AValueQueryWithAnOnEmpty::build_metadata(vec![], &mut entries); + assert_eq!( + entries, + vec![ + StorageEntryMetadata { + name: "Foo", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + hashers: vec![StorageHasher::Blake2_128Concat], + key: scale_info::meta_type::(), + value: scale_info::meta_type::(), + }, + default: Option::::None.encode(), + docs: vec![], + }, + StorageEntryMetadata { + name: "Foo", + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Map { + hashers: vec![StorageHasher::Blake2_128Concat], + key: scale_info::meta_type::(), + value: scale_info::meta_type::(), + }, + default: 98u32.encode(), + docs: vec![], + } + ] + ); WithLen::remove_all(None); assert_eq!(WithLen::decode_len((3,)), None); @@ -852,11 +878,42 @@ mod test { A::translate::(|(k1, k2), v| Some((k1 * k2 as u16 * v as u16).into())); assert_eq!(A::iter().collect::>(), vec![((4, 40), 1600), ((3, 30), 900)]); - assert_eq!(A::MODIFIER, StorageEntryModifier::Optional); - assert_eq!(AValueQueryWithAnOnEmpty::MODIFIER, StorageEntryModifier::Default); - assert_eq!(A::NAME, "Foo"); - assert_eq!(AValueQueryWithAnOnEmpty::default(), 98u32.encode()); - assert_eq!(A::default(), Option::::None.encode()); + let mut entries = vec![]; + A::build_metadata(vec![], &mut entries); + AValueQueryWithAnOnEmpty::build_metadata(vec![], &mut entries); + assert_eq!( + entries, + vec![ + StorageEntryMetadata { + name: "Foo", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + hashers: vec![ + StorageHasher::Blake2_128Concat, + StorageHasher::Twox64Concat + ], + key: scale_info::meta_type::<(u16, u8)>(), + value: scale_info::meta_type::(), + }, + default: Option::::None.encode(), + docs: vec![], + }, + StorageEntryMetadata { + name: "Foo", + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Map { + hashers: vec![ + StorageHasher::Blake2_128Concat, + StorageHasher::Twox64Concat + ], + key: scale_info::meta_type::<(u16, u8)>(), + value: scale_info::meta_type::(), + }, + default: 98u32.encode(), + docs: vec![], + } + ] + ); WithLen::remove_all(None); assert_eq!(WithLen::decode_len((3, 30)), None); @@ -1042,11 +1099,44 @@ mod test { }); assert_eq!(A::iter().collect::>(), vec![((4, 40, 400), 4), ((3, 30, 300), 3)]); - assert_eq!(A::MODIFIER, StorageEntryModifier::Optional); - assert_eq!(AValueQueryWithAnOnEmpty::MODIFIER, StorageEntryModifier::Default); - assert_eq!(A::NAME, "Foo"); - assert_eq!(AValueQueryWithAnOnEmpty::default(), 98u32.encode()); - assert_eq!(A::default(), Option::::None.encode()); + let mut entries = vec![]; + A::build_metadata(vec![], &mut entries); + AValueQueryWithAnOnEmpty::build_metadata(vec![], &mut entries); + assert_eq!( + entries, + vec![ + StorageEntryMetadata { + name: "Foo", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + hashers: vec![ + StorageHasher::Blake2_128Concat, + StorageHasher::Blake2_128Concat, + StorageHasher::Twox64Concat + ], + key: scale_info::meta_type::<(u16, u16, u16)>(), + value: scale_info::meta_type::(), + }, + default: Option::::None.encode(), + docs: vec![], + }, + StorageEntryMetadata { + name: "Foo", + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Map { + hashers: vec![ + StorageHasher::Blake2_128Concat, + StorageHasher::Blake2_128Concat, + StorageHasher::Twox64Concat + ], + key: scale_info::meta_type::<(u16, u16, u16)>(), + value: scale_info::meta_type::(), + }, + default: 98u32.encode(), + docs: vec![], + } + ] + ); WithLen::remove_all(None); assert_eq!(WithLen::decode_len((3, 30, 300)), None); diff --git a/frame/support/src/storage/types/value.rs b/frame/support/src/storage/types/value.rs index d7f15487592b1..c5e7173bd0af7 100644 --- a/frame/support/src/storage/types/value.rs +++ b/frame/support/src/storage/types/value.rs @@ -18,10 +18,10 @@ //! Storage value type. Implements StorageValue trait and its method directly. use crate::{ - metadata::{StorageEntryModifier, StorageEntryType}, + metadata::{StorageEntryMetadata, StorageEntryType}, storage::{ generator::StorageValue as StorageValueT, - types::{OptionQuery, QueryKindTrait, StorageEntryMetadata}, + types::{OptionQuery, QueryKindTrait, StorageEntryMetadataBuilder}, StorageAppend, StorageDecodeLength, StorageTryAppend, }, traits::{GetDefault, StorageInfo, StorageInstance}, @@ -201,7 +201,7 @@ where } } -impl StorageEntryMetadata +impl StorageEntryMetadataBuilder for StorageValue where Prefix: StorageInstance, @@ -209,15 +209,16 @@ where QueryKind: QueryKindTrait, OnEmpty: crate::traits::Get + 'static, { - const MODIFIER: StorageEntryModifier = QueryKind::METADATA; - const NAME: &'static str = Prefix::STORAGE_PREFIX; - - fn ty() -> StorageEntryType { - StorageEntryType::Plain(scale_info::meta_type::()) - } - - fn default() -> Vec { - OnEmpty::get().encode() + fn build_metadata(docs: Vec<&'static str>, entries: &mut Vec) { + let entry = StorageEntryMetadata { + name: Prefix::STORAGE_PREFIX, + modifier: QueryKind::METADATA, + ty: StorageEntryType::Plain(scale_info::meta_type::()), + default: OnEmpty::get().encode(), + docs, + }; + + entries.push(entry); } } @@ -342,11 +343,28 @@ mod test { A::kill(); assert_eq!(A::try_get(), Err(())); - assert_eq!(A::MODIFIER, StorageEntryModifier::Optional); - assert_eq!(AValueQueryWithAnOnEmpty::MODIFIER, StorageEntryModifier::Default); - assert_eq!(A::NAME, "foo"); - assert_eq!(A::default(), Option::::None.encode()); - assert_eq!(AValueQueryWithAnOnEmpty::default(), 97u32.encode()); + let mut entries = vec![]; + A::build_metadata(vec![], &mut entries); + AValueQueryWithAnOnEmpty::build_metadata(vec![], &mut entries); + assert_eq!( + entries, + vec![ + StorageEntryMetadata { + name: "foo", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(scale_info::meta_type::()), + default: Option::::None.encode(), + docs: vec![], + }, + StorageEntryMetadata { + name: "foo", + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(scale_info::meta_type::()), + default: 97u32.encode(), + docs: vec![], + } + ] + ); WithLen::kill(); assert_eq!(WithLen::decode_len(), None); diff --git a/frame/support/src/storage/weak_bounded_vec.rs b/frame/support/src/storage/weak_bounded_vec.rs index 9c30c45c3e2e1..4655c809e014b 100644 --- a/frame/support/src/storage/weak_bounded_vec.rs +++ b/frame/support/src/storage/weak_bounded_vec.rs @@ -27,7 +27,7 @@ use core::{ ops::{Deref, Index, IndexMut}, slice::SliceIndex, }; -use sp_std::{convert::TryFrom, fmt, marker::PhantomData, prelude::*}; +use sp_std::{convert::TryFrom, marker::PhantomData, prelude::*}; /// A weakly bounded vector. /// @@ -171,12 +171,12 @@ impl Default for WeakBoundedVec { } #[cfg(feature = "std")] -impl fmt::Debug for WeakBoundedVec +impl std::fmt::Debug for WeakBoundedVec where - T: fmt::Debug, + T: std::fmt::Debug, S: Get, { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_tuple("WeakBoundedVec").field(&self.0).field(&Self::bound()).finish() } } diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index efb5559ed0622..5ac0208dc2033 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -52,7 +52,7 @@ mod misc; pub use misc::{ Backing, ConstU32, EnsureInherentsAreFirst, EstimateCallFee, ExecuteBlock, ExtrinsicCall, Get, GetBacking, GetDefault, HandleLifetime, IsSubType, IsType, Len, OffchainWorker, - OnKilledAccount, OnNewAccount, SameOrOther, Time, TryDrop, UnixTime, + OnKilledAccount, OnNewAccount, SameOrOther, Time, TryDrop, UnixTime, WrapperOpaque, }; mod stored_map; @@ -62,8 +62,8 @@ pub use randomness::Randomness; mod metadata; pub use metadata::{ - CallMetadata, GetCallMetadata, GetCallName, GetStorageVersion, PalletInfo, PalletInfoAccess, - StorageVersion, STORAGE_VERSION_STORAGE_KEY_POSTFIX, + CallMetadata, CrateVersion, GetCallMetadata, GetCallName, GetStorageVersion, PalletInfo, + PalletInfoAccess, StorageVersion, STORAGE_VERSION_STORAGE_KEY_POSTFIX, }; mod hooks; diff --git a/frame/support/src/traits/hooks.rs b/frame/support/src/traits/hooks.rs index adba88e5acbf3..2a8b0a156247a 100644 --- a/frame/support/src/traits/hooks.rs +++ b/frame/support/src/traits/hooks.rs @@ -19,7 +19,7 @@ use impl_trait_for_tuples::impl_for_tuples; use sp_arithmetic::traits::Saturating; -use sp_runtime::traits::{AtLeast32BitUnsigned, MaybeSerializeDeserialize}; +use sp_runtime::traits::AtLeast32BitUnsigned; /// The block initialization trait. /// @@ -294,7 +294,7 @@ pub trait Hooks { /// A trait to define the build function of a genesis config, T and I are placeholder for pallet /// trait and pallet instance. #[cfg(feature = "std")] -pub trait GenesisBuild: Default + MaybeSerializeDeserialize { +pub trait GenesisBuild: Default + sp_runtime::traits::MaybeSerializeDeserialize { /// The build function is called within an externalities allowing storage APIs. /// Thus one can write to storage using regular pallet storages. fn build(&self); diff --git a/frame/support/src/traits/metadata.rs b/frame/support/src/traits/metadata.rs index e877f29e0a137..14b7e6d7355e2 100644 --- a/frame/support/src/traits/metadata.rs +++ b/frame/support/src/traits/metadata.rs @@ -20,7 +20,7 @@ use codec::{Decode, Encode}; use sp_runtime::RuntimeDebug; -/// Provides information about the pallet setup in the runtime. +/// Provides information about the pallet itself and its setup in the runtime. /// /// An implementor should be able to provide information about each pallet that /// is configured in `construct_runtime!`. @@ -29,16 +29,25 @@ pub trait PalletInfo { fn index() -> Option; /// Convert the given pallet `P` into its name as configured in the runtime. fn name() -> Option<&'static str>; + /// Convert the given pallet `P` into its Rust module name as used in `construct_runtime!`. + fn module_name() -> Option<&'static str>; + /// Convert the given pallet `P` into its containing crate version. + fn crate_version() -> Option; } -/// Provides information about the pallet setup in the runtime. +/// Provides information about the pallet itself and its setup in the runtime. /// -/// Access the information provided by [`PalletInfo`] for a specific pallet. +/// Declare some information and access the information provided by [`PalletInfo`] for a specific +/// pallet. pub trait PalletInfoAccess { /// Index of the pallet as configured in the runtime. fn index() -> usize; /// Name of the pallet as configured in the runtime. fn name() -> &'static str; + /// Name of the Rust module containing the pallet. + fn module_name() -> &'static str; + /// Version of the crate containing the pallet. + fn crate_version() -> CrateVersion; } /// The function and pallet name of the Call. @@ -68,6 +77,34 @@ pub trait GetCallMetadata { fn get_call_metadata(&self) -> CallMetadata; } +/// The version of a crate. +#[derive(RuntimeDebug, Eq, PartialEq, Encode, Decode, Ord, Clone, Copy, Default)] +pub struct CrateVersion { + /// The major version of the crate. + pub major: u16, + /// The minor version of the crate. + pub minor: u8, + /// The patch version of the crate. + pub patch: u8, +} + +impl CrateVersion { + pub const fn new(major: u16, minor: u8, patch: u8) -> Self { + Self { major, minor, patch } + } +} + +impl sp_std::cmp::PartialOrd for CrateVersion { + fn partial_cmp(&self, other: &Self) -> Option { + let res = self + .major + .cmp(&other.major) + .then_with(|| self.minor.cmp(&other.minor).then_with(|| self.patch.cmp(&other.patch))); + + Some(res) + } +} + /// The storage key postfix that is used to store the [`StorageVersion`] per pallet. /// /// The full storage key is built by using: diff --git a/frame/support/src/traits/misc.rs b/frame/support/src/traits/misc.rs index 1776e1ba320ea..db6e0321005a5 100644 --- a/frame/support/src/traits/misc.rs +++ b/frame/support/src/traits/misc.rs @@ -18,7 +18,10 @@ //! Smaller traits used in FRAME which don't need their own file. use crate::dispatch::Parameter; +use codec::{Decode, Encode, EncodeLike, Input, MaxEncodedLen}; +use scale_info::{build::Fields, meta_type, Path, Type, TypeInfo, TypeParameter}; use sp_runtime::{traits::Block as BlockT, DispatchError}; +use sp_std::prelude::*; /// Anything that can have a `::len()` method. pub trait Len { @@ -377,3 +380,112 @@ impl, const T: u32> EstimateCallFee for T.into() } } + +/// A wrapper for any type `T` which implement encode/decode in a way compatible with `Vec`. +/// +/// The encoding is the encoding of `T` prepended with the compact encoding of its size in bytes. +/// Thus the encoded value can be decoded as a `Vec`. +#[derive(Debug, Eq, PartialEq, Default, Clone)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +pub struct WrapperOpaque(pub T); + +impl EncodeLike for WrapperOpaque {} + +impl Encode for WrapperOpaque { + fn size_hint(&self) -> usize { + self.0.size_hint().saturating_add(>::max_encoded_len()) + } + + fn encode_to(&self, dest: &mut O) { + self.0.encode().encode_to(dest); + } + + fn encode(&self) -> Vec { + self.0.encode().encode() + } + + fn using_encoded R>(&self, f: F) -> R { + self.0.encode().using_encoded(f) + } +} + +impl Decode for WrapperOpaque { + fn decode(input: &mut I) -> Result { + Ok(Self(T::decode(&mut &>::decode(input)?[..])?)) + } + + fn skip(input: &mut I) -> Result<(), codec::Error> { + >::skip(input) + } +} + +impl From for WrapperOpaque { + fn from(t: T) -> Self { + Self(t) + } +} + +impl MaxEncodedLen for WrapperOpaque { + fn max_encoded_len() -> usize { + let t_max_len = T::max_encoded_len(); + + // See scale encoding https://substrate.dev/docs/en/knowledgebase/advanced/codec + if t_max_len < 64 { + t_max_len + 1 + } else if t_max_len < 2usize.pow(14) { + t_max_len + 2 + } else if t_max_len < 2usize.pow(30) { + t_max_len + 4 + } else { + >::max_encoded_len().saturating_add(T::max_encoded_len()) + } + } +} + +impl TypeInfo for WrapperOpaque { + type Identity = Self; + fn type_info() -> Type { + Type::builder() + .path(Path::new("WrapperOpaque", module_path!())) + .type_params(vec![TypeParameter::new("T", Some(meta_type::()))]) + .composite( + Fields::unnamed() + .field(|f| f.compact::()) + .field(|f| f.ty::().type_name("T")), + ) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_opaque_wrapper() { + let encoded = WrapperOpaque(3u32).encode(); + assert_eq!(encoded, [codec::Compact(4u32).encode(), 3u32.to_le_bytes().to_vec()].concat()); + let vec_u8 = >::decode(&mut &encoded[..]).unwrap(); + let decoded_from_vec_u8 = u32::decode(&mut &vec_u8[..]).unwrap(); + assert_eq!(decoded_from_vec_u8, 3u32); + let decoded = >::decode(&mut &encoded[..]).unwrap(); + assert_eq!(decoded.0, 3u32); + + assert_eq!(>::max_encoded_len(), 63 + 1); + assert_eq!( + >::max_encoded_len(), + WrapperOpaque([0u8; 63]).encode().len() + ); + + assert_eq!(>::max_encoded_len(), 64 + 2); + assert_eq!( + >::max_encoded_len(), + WrapperOpaque([0u8; 64]).encode().len() + ); + + assert_eq!( + >::max_encoded_len(), + 2usize.pow(14) - 1 + 2 + ); + assert_eq!(>::max_encoded_len(), 2usize.pow(14) + 4); + } +} diff --git a/frame/support/src/traits/tokens.rs b/frame/support/src/traits/tokens.rs index aca62bcad65c7..91a9382d07fcc 100644 --- a/frame/support/src/traits/tokens.rs +++ b/frame/support/src/traits/tokens.rs @@ -26,6 +26,6 @@ pub mod nonfungible; pub mod nonfungibles; pub use imbalance::Imbalance; pub use misc::{ - BalanceConversion, BalanceStatus, DepositConsequence, ExistenceRequirement, + AssetId, Balance, BalanceConversion, BalanceStatus, DepositConsequence, ExistenceRequirement, WithdrawConsequence, WithdrawReasons, }; diff --git a/frame/support/src/traits/tokens/fungibles.rs b/frame/support/src/traits/tokens/fungibles.rs index 3f5a1c75860c2..2930853201d25 100644 --- a/frame/support/src/traits/tokens/fungibles.rs +++ b/frame/support/src/traits/tokens/fungibles.rs @@ -23,6 +23,7 @@ use super::{ }; use crate::dispatch::{DispatchError, DispatchResult}; use sp_runtime::traits::Saturating; +use sp_std::vec::Vec; mod balanced; pub use balanced::{Balanced, Unbalanced}; @@ -65,6 +66,18 @@ pub trait Inspect { ) -> WithdrawConsequence; } +/// Trait for reading metadata from a fungible asset. +pub trait InspectMetadata: Inspect { + /// Return the name of an asset. + fn name(asset: &Self::AssetId) -> Vec; + + /// Return the symbol of an asset. + fn symbol(asset: &Self::AssetId) -> Vec; + + /// Return the decimals of an asset. + fn decimals(asset: &Self::AssetId) -> u8; +} + /// Trait for providing a set of named fungible assets which can be created and destroyed. pub trait Mutate: Inspect { /// Attempt to increase the `asset` balance of `who` by `amount`. @@ -227,3 +240,39 @@ impl + MutateHold> BalancedHold>::slash(asset, who, actual) } } + +/// Trait for providing the ability to create new fungible assets. +pub trait Create: Inspect { + /// Create a new fungible asset. + fn create( + id: Self::AssetId, + admin: AccountId, + is_sufficient: bool, + min_balance: Self::Balance, + ) -> DispatchResult; +} + +/// Trait for providing the ability to destroy existing fungible assets. +pub trait Destroy: Inspect { + /// The witness data needed to destroy an asset. + type DestroyWitness; + + /// Provide the appropriate witness data needed to destroy an asset. + fn get_destroy_witness(id: &Self::AssetId) -> Option; + + /// Destroy an existing fungible asset. + /// * `id`: The `AssetId` to be destroyed. + /// * `witness`: Any witness data that needs to be provided to complete the operation + /// successfully. + /// * `maybe_check_owner`: An optional account id that can be used to authorize the destroy + /// command. If not provided, we will not do any authorization checks before destroying the + /// asset. + /// + /// If successful, this function will return the actual witness data from the destroyed asset. + /// This may be different than the witness data provided, and can be used to refund weight. + fn destroy( + id: Self::AssetId, + witness: Self::DestroyWitness, + maybe_check_owner: Option, + ) -> Result; +} diff --git a/frame/support/src/traits/tokens/nonfungibles.rs b/frame/support/src/traits/tokens/nonfungibles.rs index 452ee2212d62a..b5a14761064f3 100644 --- a/frame/support/src/traits/tokens/nonfungibles.rs +++ b/frame/support/src/traits/tokens/nonfungibles.rs @@ -27,7 +27,7 @@ //! Implementations of these traits may be converted to implementations of corresponding //! `nonfungible` traits by using the `nonfungible::ItemOf` type adapter. -use crate::dispatch::DispatchResult; +use crate::dispatch::{DispatchError, DispatchResult}; use codec::{Decode, Encode}; use sp_runtime::TokenError; use sp_std::prelude::*; @@ -123,6 +123,31 @@ pub trait Create: Inspect { fn create_class(class: &Self::ClassId, who: &AccountId, admin: &AccountId) -> DispatchResult; } +/// Trait for providing the ability to destroy classes of nonfungible assets. +pub trait Destroy: Inspect { + /// The witness data needed to destroy an asset. + type DestroyWitness; + + /// Provide the appropriate witness data needed to destroy an asset. + fn get_destroy_witness(class: &Self::ClassId) -> Option; + + /// Destroy an existing fungible asset. + /// * `class`: The `ClassId` to be destroyed. + /// * `witness`: Any witness data that needs to be provided to complete the operation + /// successfully. + /// * `maybe_check_owner`: An optional account id that can be used to authorize the destroy + /// command. If not provided, we will not do any authorization checks before destroying the + /// asset. + /// + /// If successful, this function will return the actual witness data from the destroyed asset. + /// This may be different than the witness data provided, and can be used to refund weight. + fn destroy( + class: Self::ClassId, + witness: Self::DestroyWitness, + maybe_check_owner: Option, + ) -> Result; +} + /// Trait for providing an interface for multiple classes of NFT-like assets which may be minted, /// burned and/or have attributes set on them. pub trait Mutate: Inspect { diff --git a/frame/support/src/traits/validation.rs b/frame/support/src/traits/validation.rs index f4107ef6e2b02..11ea5a79f67ba 100644 --- a/frame/support/src/traits/validation.rs +++ b/frame/support/src/traits/validation.rs @@ -18,7 +18,7 @@ //! Traits for dealing with validation and validators. use crate::{dispatch::Parameter, weights::Weight}; -use codec::{Codec, Decode}; +use codec::{Codec, Decode, MaxEncodedLen}; use sp_runtime::{ traits::{Convert, Zero}, BoundToRuntimeAppPublic, ConsensusEngineId, Permill, RuntimeAppPublic, @@ -31,7 +31,7 @@ use sp_std::prelude::*; /// Something that can give information about the current validator set. pub trait ValidatorSet { /// Type for representing validator id in a session. - type ValidatorId: Parameter; + type ValidatorId: Parameter + MaxEncodedLen; /// A type for converting `AccountId` to `ValidatorId`. type ValidatorIdOf: Convert>; diff --git a/frame/support/test/Cargo.toml b/frame/support/test/Cargo.toml index e12880871e5c2..863afceac4a98 100644 --- a/frame/support/test/Cargo.toml +++ b/frame/support/test/Cargo.toml @@ -48,3 +48,5 @@ try-runtime = ["frame-support/try-runtime"] # WARNING: CI only execute pallet test with this feature, # if the feature intended to be used outside, CI and this message need to be updated. conditional-storage = [] +# Disable ui tests +disable-ui-tests = [] diff --git a/frame/support/test/src/lib.rs b/frame/support/test/src/lib.rs index 52c0a6270d47f..073f8c9c19352 100644 --- a/frame/support/test/src/lib.rs +++ b/frame/support/test/src/lib.rs @@ -49,6 +49,12 @@ impl frame_support::traits::PalletInfo for PanicPalletInfo { fn name() -> Option<&'static str> { unimplemented!("PanicPalletInfo mustn't be triggered by tests"); } + fn module_name() -> Option<&'static str> { + unimplemented!("PanicPalletInfo mustn't be triggered by tests"); + } + fn crate_version() -> Option { + unimplemented!("PanicPalletInfo mustn't be triggered by tests"); + } } /// Provides an implementation of [`frame_support::traits::Randomness`] that should only be used in diff --git a/frame/support/test/tests/construct_runtime.rs b/frame/support/test/tests/construct_runtime.rs index 062993fe10fbb..dd5538370449d 100644 --- a/frame/support/test/tests/construct_runtime.rs +++ b/frame/support/test/tests/construct_runtime.rs @@ -21,7 +21,7 @@ #![recursion_limit = "128"] -use frame_support::traits::PalletInfo as _; +use frame_support::traits::{CrateVersion, PalletInfo as _}; use scale_info::TypeInfo; use sp_core::{sr25519, H256}; use sp_runtime::{ @@ -739,40 +739,66 @@ fn test_metadata() { fn pallet_in_runtime_is_correct() { assert_eq!(PalletInfo::index::().unwrap(), 30); assert_eq!(PalletInfo::name::().unwrap(), "System"); + assert_eq!(PalletInfo::module_name::().unwrap(), "system"); + assert_eq!(PalletInfo::crate_version::().unwrap(), CrateVersion::new(3, 0, 0)); assert_eq!(PalletInfo::index::().unwrap(), 31); assert_eq!(PalletInfo::name::().unwrap(), "Module1_1"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert_eq!(PalletInfo::crate_version::().unwrap(), CrateVersion::new(3, 0, 0)); assert_eq!(PalletInfo::index::().unwrap(), 32); assert_eq!(PalletInfo::name::().unwrap(), "Module2"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module2"); + assert_eq!(PalletInfo::crate_version::().unwrap(), CrateVersion::new(3, 0, 0)); assert_eq!(PalletInfo::index::().unwrap(), 33); assert_eq!(PalletInfo::name::().unwrap(), "Module1_2"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert_eq!(PalletInfo::crate_version::().unwrap(), CrateVersion::new(3, 0, 0)); assert_eq!(PalletInfo::index::().unwrap(), 34); assert_eq!(PalletInfo::name::().unwrap(), "NestedModule3"); + assert_eq!(PalletInfo::module_name::().unwrap(), "nested::module3"); + assert_eq!(PalletInfo::crate_version::().unwrap(), CrateVersion::new(3, 0, 0)); assert_eq!(PalletInfo::index::().unwrap(), 35); assert_eq!(PalletInfo::name::().unwrap(), "Module3"); + assert_eq!(PalletInfo::module_name::().unwrap(), "self::module3"); + assert_eq!(PalletInfo::crate_version::().unwrap(), CrateVersion::new(3, 0, 0)); assert_eq!(PalletInfo::index::().unwrap(), 6); assert_eq!(PalletInfo::name::().unwrap(), "Module1_3"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert_eq!(PalletInfo::crate_version::().unwrap(), CrateVersion::new(3, 0, 0)); assert_eq!(PalletInfo::index::().unwrap(), 3); assert_eq!(PalletInfo::name::().unwrap(), "Module1_4"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert_eq!(PalletInfo::crate_version::().unwrap(), CrateVersion::new(3, 0, 0)); assert_eq!(PalletInfo::index::().unwrap(), 4); assert_eq!(PalletInfo::name::().unwrap(), "Module1_5"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert_eq!(PalletInfo::crate_version::().unwrap(), CrateVersion::new(3, 0, 0)); assert_eq!(PalletInfo::index::().unwrap(), 1); assert_eq!(PalletInfo::name::().unwrap(), "Module1_6"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert_eq!(PalletInfo::crate_version::().unwrap(), CrateVersion::new(3, 0, 0)); assert_eq!(PalletInfo::index::().unwrap(), 2); assert_eq!(PalletInfo::name::().unwrap(), "Module1_7"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert_eq!(PalletInfo::crate_version::().unwrap(), CrateVersion::new(3, 0, 0)); assert_eq!(PalletInfo::index::().unwrap(), 12); assert_eq!(PalletInfo::name::().unwrap(), "Module1_8"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert_eq!(PalletInfo::crate_version::().unwrap(), CrateVersion::new(3, 0, 0)); assert_eq!(PalletInfo::index::().unwrap(), 13); assert_eq!(PalletInfo::name::().unwrap(), "Module1_9"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert_eq!(PalletInfo::crate_version::().unwrap(), CrateVersion::new(3, 0, 0)); } diff --git a/frame/support/test/tests/construct_runtime_ui.rs b/frame/support/test/tests/construct_runtime_ui.rs index a55e800628582..ee475e37605ef 100644 --- a/frame/support/test/tests/construct_runtime_ui.rs +++ b/frame/support/test/tests/construct_runtime_ui.rs @@ -18,6 +18,7 @@ use std::env; #[rustversion::attr(not(stable), ignore)] +#[cfg(not(feature = "disable-ui-tests"))] #[test] fn ui() { // As trybuild is using `cargo check`, we don't need the real WASM binaries. diff --git a/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr b/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr index 5bc831f58988b..3dc7fcda9f18a 100644 --- a/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr +++ b/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr @@ -10,7 +10,7 @@ error: `Pallet` does not have the std feature enabled, this will cause the `test 22 | | } | |_^ | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `test_pallet::__substrate_genesis_config_check::is_std_enabled_for_genesis` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0433]: failed to resolve: use of undeclared crate or module `system` --> $DIR/no_std_genesis_config.rs:19:11 @@ -30,7 +30,7 @@ error[E0433]: failed to resolve: use of undeclared crate or module `system` 22 | | } | |_^ not found in `system` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing this enum | 1 | use frame_system::RawOrigin; @@ -48,7 +48,7 @@ error[E0433]: failed to resolve: use of undeclared crate or module `system` 22 | | } | |_^ not found in `system` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing one of these items | 1 | use frame_support_test::Pallet; @@ -70,7 +70,7 @@ error[E0412]: cannot find type `GenesisConfig` in crate `test_pallet` 22 | | } | |_^ not found in `test_pallet` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing this struct | 1 | use frame_system::GenesisConfig; diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr index 8781fe0df201a..2629cf4101923 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr @@ -13,7 +13,7 @@ error: `Pallet` does not have #[pallet::call] defined, perhaps you should remove 31 | | } | |_- in this macro invocation | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `pallet::__substrate_call_check::is_call_part_defined` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0433]: failed to resolve: use of undeclared crate or module `system` --> $DIR/undefined_call_part.rs:28:11 @@ -33,7 +33,7 @@ error[E0433]: failed to resolve: use of undeclared crate or module `system` 31 | | } | |_^ not found in `system` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing this enum | 1 | use frame_system::RawOrigin; @@ -51,7 +51,7 @@ error[E0433]: failed to resolve: use of undeclared crate or module `system` 31 | | } | |_^ not found in `system` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing one of these items | 1 | use crate::pallet::Pallet; diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr index fa837698aa642..af69b79ed1a64 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr @@ -13,7 +13,7 @@ error: `Pallet` does not have #[pallet::event] defined, perhaps you should remov 31 | | } | |_- in this macro invocation | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `pallet::__substrate_event_check::is_event_part_defined` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0433]: failed to resolve: use of undeclared crate or module `system` --> $DIR/undefined_event_part.rs:28:11 @@ -33,7 +33,7 @@ error[E0412]: cannot find type `Event` in module `pallet` 31 | | } | |_^ not found in `pallet` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing this enum | 1 | use frame_system::Event; @@ -51,7 +51,7 @@ error[E0412]: cannot find type `Event` in module `pallet` 31 | | } | |_^ not found in `pallet` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing one of these items | 1 | use crate::Event; @@ -71,7 +71,7 @@ error[E0433]: failed to resolve: use of undeclared crate or module `system` 31 | | } | |_^ not found in `system` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing this enum | 1 | use frame_system::RawOrigin; @@ -89,7 +89,7 @@ error[E0433]: failed to resolve: use of undeclared crate or module `system` 31 | | } | |_^ not found in `system` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing one of these items | 1 | use crate::pallet::Pallet; diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr index 699f66a414ed2..bfedb921bca44 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr @@ -13,7 +13,7 @@ error: `Pallet` does not have #[pallet::genesis_config] defined, perhaps you sho 31 | | } | |_- in this macro invocation | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `pallet::__substrate_genesis_config_check::is_genesis_config_defined` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0433]: failed to resolve: use of undeclared crate or module `system` --> $DIR/undefined_genesis_config_part.rs:28:17 @@ -33,7 +33,7 @@ error[E0433]: failed to resolve: use of undeclared crate or module `system` 31 | | } | |_^ not found in `system` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing this enum | 1 | use frame_system::RawOrigin; @@ -51,7 +51,7 @@ error[E0433]: failed to resolve: use of undeclared crate or module `system` 31 | | } | |_^ not found in `system` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing one of these items | 1 | use crate::pallet::Pallet; @@ -75,7 +75,7 @@ error[E0412]: cannot find type `GenesisConfig` in module `pallet` 31 | | } | |_^ not found in `pallet` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing this struct | 1 | use frame_system::GenesisConfig; diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr index 88ff9ee910937..50dde1108263b 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr @@ -13,7 +13,7 @@ error: `Pallet` does not have #[pallet::inherent] defined, perhaps you should re 31 | | } | |_- in this macro invocation | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `pallet::__substrate_inherent_check::is_inherent_part_defined` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0433]: failed to resolve: use of undeclared crate or module `system` --> $DIR/undefined_inherent_part.rs:28:11 @@ -33,7 +33,7 @@ error[E0433]: failed to resolve: use of undeclared crate or module `system` 31 | | } | |_^ not found in `system` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing this enum | 1 | use frame_system::RawOrigin; @@ -51,7 +51,7 @@ error[E0433]: failed to resolve: use of undeclared crate or module `system` 31 | | } | |_^ not found in `system` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing one of these items | 1 | use crate::pallet::Pallet; diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr index 3b3aa75c1ea08..b5f3ec4d381bc 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr @@ -13,7 +13,7 @@ error: `Pallet` does not have #[pallet::origin] defined, perhaps you should remo 31 | | } | |_- in this macro invocation | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `pallet::__substrate_origin_check::is_origin_part_defined` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0433]: failed to resolve: use of undeclared crate or module `system` --> $DIR/undefined_origin_part.rs:28:11 @@ -33,7 +33,7 @@ error[E0433]: failed to resolve: use of undeclared crate or module `system` 31 | | } | |_^ not found in `system` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing this enum | 1 | use frame_system::RawOrigin; @@ -51,7 +51,7 @@ error[E0412]: cannot find type `Origin` in module `pallet` 31 | | } | |_^ not found in `pallet` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing this type alias | 1 | use frame_system::Origin; @@ -69,7 +69,7 @@ error[E0412]: cannot find type `Origin` in module `pallet` 31 | | } | |_^ not found in `pallet` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing one of these items | 1 | use crate::Origin; @@ -89,7 +89,7 @@ error[E0433]: failed to resolve: use of undeclared crate or module `system` 31 | | } | |_^ not found in `system` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing one of these items | 1 | use crate::pallet::Pallet; diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr index ac12c56d5c279..12bdce67cf038 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr @@ -13,7 +13,7 @@ error: `Pallet` does not have #[pallet::validate_unsigned] defined, perhaps you 31 | | } | |_- in this macro invocation | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `pallet::__substrate_validate_unsigned_check::is_validate_unsigned_part_defined` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0433]: failed to resolve: use of undeclared crate or module `system` --> $DIR/undefined_validate_unsigned_part.rs:28:11 @@ -33,7 +33,7 @@ error[E0433]: failed to resolve: use of undeclared crate or module `system` 31 | | } | |_^ not found in `system` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing this enum | 1 | use frame_system::RawOrigin; @@ -51,7 +51,7 @@ error[E0433]: failed to resolve: use of undeclared crate or module `system` 31 | | } | |_^ not found in `system` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing one of these items | 1 | use crate::pallet::Pallet; diff --git a/frame/support/test/tests/decl_module_ui.rs b/frame/support/test/tests/decl_module_ui.rs index 2c097bb6e1332..e84025b9f2564 100644 --- a/frame/support/test/tests/decl_module_ui.rs +++ b/frame/support/test/tests/decl_module_ui.rs @@ -16,6 +16,7 @@ // limitations under the License. #[rustversion::attr(not(stable), ignore)] +#[cfg(not(feature = "disable-ui-tests"))] #[test] fn decl_module_ui() { // As trybuild is using `cargo check`, we don't need the real WASM binaries. diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr index 3bf5f58b43a39..86c427d8080be 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr @@ -10,7 +10,7 @@ error: `integrity_test` can only be passed once as input. 7 | | } | |_^ | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `$crate::decl_module` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0601]: `main` function not found in crate `$CRATE` --> $DIR/reserved_keyword_two_times_integrity_test.rs:1:1 diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs index ddde7c72c1cc5..18aaec12c5f39 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs @@ -9,3 +9,5 @@ frame_support::decl_module! { } } } + +fn main() {} diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr index 2911d7ded8a23..369be77b8d249 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr @@ -10,16 +10,4 @@ error: `on_initialize` can only be passed once as input. 11 | | } | |_^ | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) - -error[E0601]: `main` function not found in crate `$CRATE` - --> $DIR/reserved_keyword_two_times_on_initialize.rs:1:1 - | -1 | / frame_support::decl_module! { -2 | | pub struct Module for enum Call where origin: T::Origin, system=self { -3 | | fn on_initialize() -> Weight { -4 | | 0 -... | -10 | | } -11 | | } - | |_^ consider adding a `main` function to `$DIR/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs` + = note: this error originates in the macro `$crate::decl_module` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/support/test/tests/decl_storage_ui.rs b/frame/support/test/tests/decl_storage_ui.rs index 99d2da87aca28..400ddfc0f94f4 100644 --- a/frame/support/test/tests/decl_storage_ui.rs +++ b/frame/support/test/tests/decl_storage_ui.rs @@ -16,6 +16,7 @@ // limitations under the License. #[rustversion::attr(not(stable), ignore)] +#[cfg(not(feature = "disable-ui-tests"))] #[test] fn decl_storage_ui() { // As trybuild is using `cargo check`, we don't need the real WASM binaries. diff --git a/frame/support/test/tests/derive_no_bound_ui.rs b/frame/support/test/tests/derive_no_bound_ui.rs index 434671e19b105..22c116931a47e 100644 --- a/frame/support/test/tests/derive_no_bound_ui.rs +++ b/frame/support/test/tests/derive_no_bound_ui.rs @@ -16,6 +16,7 @@ // limitations under the License. #[rustversion::attr(not(stable), ignore)] +#[cfg(not(feature = "disable-ui-tests"))] #[test] fn derive_no_bound_ui() { // As trybuild is using `cargo check`, we don't need the real WASM binaries. diff --git a/frame/support/test/tests/derive_no_bound_ui/clone.stderr b/frame/support/test/tests/derive_no_bound_ui/clone.stderr index 4b253ad12451b..050b576c8b9ed 100644 --- a/frame/support/test/tests/derive_no_bound_ui/clone.stderr +++ b/frame/support/test/tests/derive_no_bound_ui/clone.stderr @@ -1,7 +1,11 @@ error[E0277]: the trait bound `::C: Clone` is not satisfied - --> $DIR/clone.rs:7:2 - | -7 | c: T::C, - | ^ the trait `Clone` is not implemented for `::C` - | - = note: required by `clone` + --> $DIR/clone.rs:7:2 + | +7 | c: T::C, + | ^ the trait `Clone` is not implemented for `::C` + | +note: required by `clone` + --> $DIR/clone.rs:121:5 + | +121 | fn clone(&self) -> Self; + | ^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/derive_no_bound_ui/default.stderr b/frame/support/test/tests/derive_no_bound_ui/default.stderr index d58b5e9185268..7608f877a3b56 100644 --- a/frame/support/test/tests/derive_no_bound_ui/default.stderr +++ b/frame/support/test/tests/derive_no_bound_ui/default.stderr @@ -1,7 +1,11 @@ error[E0277]: the trait bound `::C: std::default::Default` is not satisfied - --> $DIR/default.rs:7:2 - | -7 | c: T::C, - | ^ the trait `std::default::Default` is not implemented for `::C` - | - = note: required by `std::default::Default::default` + --> $DIR/default.rs:7:2 + | +7 | c: T::C, + | ^ the trait `std::default::Default` is not implemented for `::C` + | +note: required by `std::default::Default::default` + --> $DIR/default.rs:116:5 + | +116 | fn default() -> Self; + | ^^^^^^^^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet.rs b/frame/support/test/tests/pallet.rs index 2874ef6bd7685..25fc2d46d2560 100644 --- a/frame/support/test/tests/pallet.rs +++ b/frame/support/test/tests/pallet.rs @@ -323,6 +323,16 @@ pub mod pallet { pub type ConditionalNMap = StorageNMap<_, (storage::Key, storage::Key), u32>; + #[pallet::storage] + #[pallet::storage_prefix = "RenamedCountedMap"] + #[pallet::getter(fn counted_storage_map)] + pub type SomeCountedStorageMap = + CountedStorageMap; + + #[pallet::storage] + #[pallet::unbounded] + pub type Unbounded = StorageValue>; + #[pallet::genesis_config] #[derive(Default)] pub struct GenesisConfig { @@ -416,6 +426,7 @@ pub mod pallet { } // Test that a pallet with non generic event and generic genesis_config is correctly handled +// and that a pallet without the attribute generate_storage_info is correctly handled. #[frame_support::pallet] pub mod pallet2 { use super::{SomeAssociation1, SomeType1}; @@ -446,6 +457,10 @@ pub mod pallet2 { #[pallet::storage] pub type SomeValue = StorageValue<_, Vec>; + #[pallet::storage] + pub type SomeCountedStorageMap = + CountedStorageMap; + #[pallet::event] pub enum Event { /// Something @@ -542,7 +557,7 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: frame_system::{Pallet, Call, Event}, + System: frame_system::{Call, Event}, Example: pallet::{Pallet, Call, Event, Config, Storage, Inherent, Origin, ValidateUnsigned}, Example2: pallet2::{Pallet, Call, Event, Config, Storage}, } @@ -899,6 +914,17 @@ fn storage_expand() { pallet::ConditionalDoubleMap::::insert(1, 2, 3); pallet::ConditionalNMap::::insert((1, 2), 3); } + + pallet::SomeCountedStorageMap::::insert(1, 2); + let mut k = [twox_128(b"Example"), twox_128(b"RenamedCountedMap")].concat(); + k.extend(1u8.using_encoded(twox_64_concat)); + assert_eq!(unhashed::get::(&k), Some(2u32)); + let k = [twox_128(b"Example"), twox_128(b"CounterForRenamedCountedMap")].concat(); + assert_eq!(unhashed::get::(&k), Some(1u32)); + + pallet::Unbounded::::put(vec![1, 2]); + let k = [twox_128(b"Example"), twox_128(b"Unbounded")].concat(); + assert_eq!(unhashed::get::>(&k), Some(vec![1, 2])); }) } @@ -982,52 +1008,6 @@ fn metadata() { use frame_support::metadata::*; let pallets = vec![ - PalletMetadata { - index: 0, - name: "System", - storage: None, - calls: Some(meta_type::>().into()), - event: Some(meta_type::>().into()), - constants: vec![ - PalletConstantMetadata { - name: "BlockWeights", - ty: meta_type::(), - value: vec![], - docs: vec![], - }, - PalletConstantMetadata { - name: "BlockLength", - ty: meta_type::(), - value: vec![], - docs: vec![], - }, - PalletConstantMetadata { - name: "BlockHashCount", - ty: meta_type::(), - value: vec![], - docs: vec![], - }, - PalletConstantMetadata { - name: "DbWeight", - ty: meta_type::(), - value: vec![], - docs: vec![], - }, - PalletConstantMetadata { - name: "Version", - ty: meta_type::(), - value: vec![], - docs: vec![], - }, - PalletConstantMetadata { - name: "SS58Prefix", - ty: meta_type::(), - value: vec![], - docs: vec![], - }, - ], - error: Some(meta_type::>().into()), - }, PalletMetadata { index: 1, name: "Example", @@ -1180,193 +1160,28 @@ fn metadata() { default: vec![0], docs: vec![], }, - ], - }), - calls: Some(meta_type::>().into()), - event: Some(meta_type::>().into()), - constants: vec![ - PalletConstantMetadata { - name: "MyGetParam", - ty: meta_type::(), - value: vec![10, 0, 0, 0], - docs: vec![" Some comment", " Some comment"], - }, - PalletConstantMetadata { - name: "MyGetParam2", - ty: meta_type::(), - value: vec![11, 0, 0, 0], - docs: vec![" Some comment", " Some comment"], - }, - PalletConstantMetadata { - name: "MyGetParam3", - ty: meta_type::(), - value: vec![12, 0, 0, 0, 0, 0, 0, 0], - docs: vec![], - }, - PalletConstantMetadata { - name: "some_extra", - ty: meta_type::(), - value: vec![100, 0, 0, 0, 0, 0, 0, 0], - docs: vec![" Some doc", " Some doc"], - }, - PalletConstantMetadata { - name: "some_extra_extra", - ty: meta_type::(), - value: vec![0, 0, 0, 0, 0, 0, 0, 0], - docs: vec![" Some doc"], - }, - ], - error: Some(PalletErrorMetadata { ty: meta_type::>() }), - }, - PalletMetadata { - index: 1, - name: "Example", - storage: Some(PalletStorageMetadata { - prefix: "Example", - entries: vec![ - StorageEntryMetadata { - name: "ValueWhereClause", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(meta_type::()), - default: vec![0], - docs: vec![], - }, - StorageEntryMetadata { - name: "Value", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(meta_type::()), - default: vec![0], - docs: vec![], - }, StorageEntryMetadata { - name: "Value2", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(meta_type::()), - default: vec![0], - docs: vec![], - }, - StorageEntryMetadata { - name: "Map", - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map { - key: meta_type::(), - value: meta_type::(), - hashers: vec![StorageHasher::Blake2_128Concat], - }, - default: vec![4, 0], - docs: vec![], - }, - StorageEntryMetadata { - name: "Map2", + name: "RenamedCountedMap", modifier: StorageEntryModifier::Optional, ty: StorageEntryType::Map { - key: meta_type::(), - value: meta_type::(), hashers: vec![StorageHasher::Twox64Concat], - }, - default: vec![0], - docs: vec![], - }, - StorageEntryMetadata { - name: "DoubleMap", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - value: meta_type::(), - key: meta_type::<(u8, u16)>(), - hashers: vec![ - StorageHasher::Blake2_128Concat, - StorageHasher::Twox64Concat, - ], - }, - default: vec![0], - docs: vec![], - }, - StorageEntryMetadata { - name: "DoubleMap2", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - value: meta_type::(), - key: meta_type::<(u16, u32)>(), - hashers: vec![ - StorageHasher::Twox64Concat, - StorageHasher::Blake2_128Concat, - ], - }, - default: vec![0], - docs: vec![], - }, - StorageEntryMetadata { - name: "NMap", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { key: meta_type::(), - hashers: vec![StorageHasher::Blake2_128Concat], value: meta_type::(), }, default: vec![0], docs: vec![], }, StorageEntryMetadata { - name: "NMap2", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - key: meta_type::<(u16, u32)>(), - hashers: vec![ - StorageHasher::Twox64Concat, - StorageHasher::Blake2_128Concat, - ], - value: meta_type::(), - }, - default: vec![0], - docs: vec![], - }, - #[cfg(feature = "conditional-storage")] - StorageEntryMetadata { - name: "ConditionalValue", - modifier: StorageEntryModifier::Optional, + name: "CounterForRenamedCountedMap", + modifier: StorageEntryModifier::Default, ty: StorageEntryType::Plain(meta_type::()), - default: vec![0], - docs: vec![], - }, - #[cfg(feature = "conditional-storage")] - StorageEntryMetadata { - name: "ConditionalMap", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - key: meta_type::(), - value: meta_type::(), - hashers: vec![StorageHasher::Twox64Concat], - }, - default: vec![0], - docs: vec![], - }, - #[cfg(feature = "conditional-storage")] - StorageEntryMetadata { - name: "ConditionalDoubleMap", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - value: meta_type::(), - key: meta_type::<(u8, u16)>(), - hashers: vec![ - StorageHasher::Blake2_128Concat, - StorageHasher::Twox64Concat, - ], - }, - default: vec![0], - docs: vec![], + default: vec![0, 0, 0, 0], + docs: vec!["Counter for the related counted storage map"], }, - #[cfg(feature = "conditional-storage")] StorageEntryMetadata { - name: "ConditionalNMap", + name: "Unbounded", modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - key: meta_type::<(u8, u16)>(), - hashers: vec![ - StorageHasher::Blake2_128Concat, - StorageHasher::Twox64Concat, - ], - value: meta_type::(), - }, + ty: StorageEntryType::Plain(meta_type::>()), default: vec![0], docs: vec![], }, @@ -1413,13 +1228,33 @@ fn metadata() { name: "Example2", storage: Some(PalletStorageMetadata { prefix: "Example2", - entries: vec![StorageEntryMetadata { - name: "SomeValue", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(meta_type::>()), - default: vec![0], - docs: vec![], - }], + entries: vec![ + StorageEntryMetadata { + name: "SomeValue", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(meta_type::>()), + default: vec![0], + docs: vec![], + }, + StorageEntryMetadata { + name: "SomeCountedStorageMap", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + hashers: vec![StorageHasher::Twox64Concat], + key: meta_type::(), + value: meta_type::(), + }, + default: vec![0], + docs: vec![], + }, + StorageEntryMetadata { + name: "CounterForSomeCountedStorageMap", + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(meta_type::()), + default: vec![0, 0, 0, 0], + docs: vec!["Counter for the related counted storage map"], + }, + ], }), calls: Some(meta_type::>().into()), event: Some(PalletEventMetadata { ty: meta_type::() }), @@ -1450,7 +1285,7 @@ fn metadata() { _ => panic!("metadata has been bumped, test needs to be updated"), }; - pretty_assertions::assert_eq!(actual_metadata.pallets[1], expected_metadata.pallets[1]); + pretty_assertions::assert_eq!(actual_metadata.pallets, expected_metadata.pallets); } #[test] @@ -1577,17 +1412,54 @@ fn test_storage_info() { max_size: Some(7 + 16 + 8), } }, + StorageInfo { + pallet_name: b"Example".to_vec(), + storage_name: b"RenamedCountedMap".to_vec(), + prefix: prefix(b"Example", b"RenamedCountedMap").to_vec(), + max_values: None, + max_size: Some(1 + 4 + 8), + }, + StorageInfo { + pallet_name: b"Example".to_vec(), + storage_name: b"CounterForRenamedCountedMap".to_vec(), + prefix: prefix(b"Example", b"CounterForRenamedCountedMap").to_vec(), + max_values: Some(1), + max_size: Some(4), + }, + StorageInfo { + pallet_name: b"Example".to_vec(), + storage_name: b"Unbounded".to_vec(), + prefix: prefix(b"Example", b"Unbounded").to_vec(), + max_values: Some(1), + max_size: None, + }, ], ); assert_eq!( Example2::storage_info(), - vec![StorageInfo { - pallet_name: b"Example2".to_vec(), - storage_name: b"SomeValue".to_vec(), - prefix: prefix(b"Example2", b"SomeValue").to_vec(), - max_values: Some(1), - max_size: None, - },], + vec![ + StorageInfo { + pallet_name: b"Example2".to_vec(), + storage_name: b"SomeValue".to_vec(), + prefix: prefix(b"Example2", b"SomeValue").to_vec(), + max_values: Some(1), + max_size: None, + }, + StorageInfo { + pallet_name: b"Example2".to_vec(), + storage_name: b"SomeCountedStorageMap".to_vec(), + prefix: prefix(b"Example2", b"SomeCountedStorageMap").to_vec(), + max_values: None, + max_size: None, + }, + StorageInfo { + pallet_name: b"Example2".to_vec(), + storage_name: b"CounterForSomeCountedStorageMap".to_vec(), + prefix: prefix(b"Example2", b"CounterForSomeCountedStorageMap").to_vec(), + max_values: Some(1), + max_size: Some(4), + }, + ], ); } diff --git a/frame/support/test/tests/pallet_ui.rs b/frame/support/test/tests/pallet_ui.rs index e5f4a54dfb000..6f56c1efd6d73 100644 --- a/frame/support/test/tests/pallet_ui.rs +++ b/frame/support/test/tests/pallet_ui.rs @@ -16,6 +16,7 @@ // limitations under the License. #[rustversion::attr(not(stable), ignore)] +#[cfg(not(feature = "disable-ui-tests"))] #[test] fn pallet_ui() { // As trybuild is using `cargo check`, we don't need the real WASM binaries. diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr index d1b040c16091f..3d1ea1adc9862 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr @@ -9,12 +9,16 @@ error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` = note: required for the cast to the object type `dyn std::fmt::Debug` error[E0277]: the trait bound `::Bar: Clone` is not satisfied - --> $DIR/call_argument_invalid_bound.rs:20:36 - | -20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { - | ^^^ the trait `Clone` is not implemented for `::Bar` - | - = note: required by `clone` + --> $DIR/call_argument_invalid_bound.rs:20:36 + | +20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + | ^^^ the trait `Clone` is not implemented for `::Bar` + | +note: required by `clone` + --> $DIR/clone.rs:121:5 + | +121 | fn clone(&self) -> Self; + | ^^^^^^^^^^^^^^^^^^^^^^^^ error[E0369]: binary operation `==` cannot be applied to type `&::Bar` --> $DIR/call_argument_invalid_bound.rs:20:36 diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr index 84d4863672957..c9ff843103b3b 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr @@ -9,12 +9,16 @@ error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` = note: required for the cast to the object type `dyn std::fmt::Debug` error[E0277]: the trait bound `::Bar: Clone` is not satisfied - --> $DIR/call_argument_invalid_bound_2.rs:20:36 - | -20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { - | ^^^ the trait `Clone` is not implemented for `::Bar` - | - = note: required by `clone` + --> $DIR/call_argument_invalid_bound_2.rs:20:36 + | +20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + | ^^^ the trait `Clone` is not implemented for `::Bar` + | +note: required by `clone` + --> $DIR/clone.rs:121:5 + | +121 | fn clone(&self) -> Self; + | ^^^^^^^^^^^^^^^^^^^^^^^^ error[E0369]: binary operation `==` cannot be applied to type `&::Bar` --> $DIR/call_argument_invalid_bound_2.rs:20:36 diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr index 73513907e85f3..144b7e12bd664 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr @@ -5,17 +5,21 @@ error[E0277]: `Bar` doesn't implement `std::fmt::Debug` | ^^^ `Bar` cannot be formatted using `{:?}` | = help: the trait `std::fmt::Debug` is not implemented for `Bar` - = note: add `#[derive(Debug)]` or manually implement `std::fmt::Debug` + = note: add `#[derive(Debug)]` to `Bar` or manually `impl std::fmt::Debug for Bar` = note: required because of the requirements on the impl of `std::fmt::Debug` for `&Bar` = note: required for the cast to the object type `dyn std::fmt::Debug` error[E0277]: the trait bound `Bar: Clone` is not satisfied - --> $DIR/call_argument_invalid_bound_3.rs:22:36 - | -22 | pub fn foo(origin: OriginFor, bar: Bar) -> DispatchResultWithPostInfo { - | ^^^ the trait `Clone` is not implemented for `Bar` - | - = note: required by `clone` + --> $DIR/call_argument_invalid_bound_3.rs:22:36 + | +22 | pub fn foo(origin: OriginFor, bar: Bar) -> DispatchResultWithPostInfo { + | ^^^ the trait `Clone` is not implemented for `Bar` + | +note: required by `clone` + --> $DIR/clone.rs:121:5 + | +121 | fn clone(&self) -> Self; + | ^^^^^^^^^^^^^^^^^^^^^^^^ error[E0369]: binary operation `==` cannot be applied to type `&Bar` --> $DIR/call_argument_invalid_bound_3.rs:22:36 diff --git a/frame/support/test/tests/pallet_ui/duplicate_storage_prefix.rs b/frame/support/test/tests/pallet_ui/duplicate_storage_prefix.rs index d103fa09d991b..5e99c84050c95 100644 --- a/frame/support/test/tests/pallet_ui/duplicate_storage_prefix.rs +++ b/frame/support/test/tests/pallet_ui/duplicate_storage_prefix.rs @@ -1,6 +1,6 @@ #[frame_support::pallet] mod pallet { - use frame_support::pallet_prelude::StorageValue; + use frame_support::pallet_prelude::*; #[pallet::config] pub trait Config: frame_system::Config {} @@ -12,9 +12,15 @@ mod pallet { #[pallet::storage] type Foo = StorageValue<_, u8>; - #[pallet::storage] - #[pallet::storage_prefix = "Foo"] - type NotFoo = StorageValue<_, u16>; + #[pallet::storage] + #[pallet::storage_prefix = "Foo"] + type NotFoo = StorageValue<_, u16>; + + #[pallet::storage] + type CounterForBar = StorageValue<_, u16>; + + #[pallet::storage] + type Bar = CountedStorageMap<_, Twox64Concat, u16, u16>; } fn main() { diff --git a/frame/support/test/tests/pallet_ui/duplicate_storage_prefix.stderr b/frame/support/test/tests/pallet_ui/duplicate_storage_prefix.stderr index 63a6e71e44045..716888c9d8b65 100644 --- a/frame/support/test/tests/pallet_ui/duplicate_storage_prefix.stderr +++ b/frame/support/test/tests/pallet_ui/duplicate_storage_prefix.stderr @@ -1,17 +1,47 @@ error: Duplicate storage prefixes found for `Foo` - --> $DIR/duplicate_storage_prefix.rs:16:32 + --> $DIR/duplicate_storage_prefix.rs:16:29 | 16 | #[pallet::storage_prefix = "Foo"] | ^^^^^ +error: Duplicate storage prefixes found for `Foo` + --> $DIR/duplicate_storage_prefix.rs:13:7 + | +13 | type Foo = StorageValue<_, u8>; + | ^^^ + +error: Duplicate storage prefixes found for `CounterForBar`, used for counter associated to counted storage map + --> $DIR/duplicate_storage_prefix.rs:23:7 + | +23 | type Bar = CountedStorageMap<_, Twox64Concat, u16, u16>; + | ^^^ + +error: Duplicate storage prefixes found for `CounterForBar` + --> $DIR/duplicate_storage_prefix.rs:20:7 + | +20 | type CounterForBar = StorageValue<_, u16>; + | ^^^^^^^^^^^^^ + error[E0412]: cannot find type `_GeneratedPrefixForStorageFoo` in this scope --> $DIR/duplicate_storage_prefix.rs:13:7 | 13 | type Foo = StorageValue<_, u8>; | ^^^ not found in this scope -error[E0121]: the type placeholder `_` is not allowed within types on item signatures - --> $DIR/duplicate_storage_prefix.rs:17:35 +error[E0412]: cannot find type `_GeneratedPrefixForStorageNotFoo` in this scope + --> $DIR/duplicate_storage_prefix.rs:17:7 | 17 | type NotFoo = StorageValue<_, u16>; - | ^ not allowed in type signatures + | ^^^^^^ not found in this scope + +error[E0412]: cannot find type `_GeneratedPrefixForStorageCounterForBar` in this scope + --> $DIR/duplicate_storage_prefix.rs:20:7 + | +20 | type CounterForBar = StorageValue<_, u16>; + | ^^^^^^^^^^^^^ not found in this scope + +error[E0412]: cannot find type `_GeneratedPrefixForStorageBar` in this scope + --> $DIR/duplicate_storage_prefix.rs:23:7 + | +23 | type Bar = CountedStorageMap<_, Twox64Concat, u16, u16>; + | ^^^ not found in this scope diff --git a/frame/support/test/tests/pallet_ui/event_field_not_member.stderr b/frame/support/test/tests/pallet_ui/event_field_not_member.stderr index d48012a6c952d..bf4c05bb4e5b5 100644 --- a/frame/support/test/tests/pallet_ui/event_field_not_member.stderr +++ b/frame/support/test/tests/pallet_ui/event_field_not_member.stderr @@ -1,10 +1,14 @@ error[E0277]: the trait bound `::Bar: Clone` is not satisfied - --> $DIR/event_field_not_member.rs:23:7 - | -23 | B { b: T::Bar }, - | ^ the trait `Clone` is not implemented for `::Bar` - | - = note: required by `clone` + --> $DIR/event_field_not_member.rs:23:7 + | +23 | B { b: T::Bar }, + | ^ the trait `Clone` is not implemented for `::Bar` + | +note: required by `clone` + --> $DIR/clone.rs:121:5 + | +121 | fn clone(&self) -> Self; + | ^^^^^^^^^^^^^^^^^^^^^^^^ error[E0369]: binary operation `==` cannot be applied to type `&::Bar` --> $DIR/event_field_not_member.rs:23:7 diff --git a/frame/support/test/tests/pallet_ui/event_not_in_trait.stderr b/frame/support/test/tests/pallet_ui/event_not_in_trait.stderr index dd96c700ce7e5..e3126ad6a85dc 100644 --- a/frame/support/test/tests/pallet_ui/event_not_in_trait.stderr +++ b/frame/support/test/tests/pallet_ui/event_not_in_trait.stderr @@ -4,4 +4,4 @@ error: Invalid usage of Event, `Config` contains no associated type `Event`, but 1 | #[frame_support::pallet] | ^^^^^^^^^^^^^^^^^^^^^^^^ | - = note: this error originates in an attribute macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the attribute macro `frame_support::pallet` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr b/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr index 4bc3cfdcbf9b7..ad8300b8d89b8 100644 --- a/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr +++ b/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr @@ -6,5 +6,5 @@ error[E0277]: the trait bound `pallet::GenesisConfig: std::default::Default` is | ::: $WORKSPACE/frame/support/src/traits/hooks.rs | - | pub trait GenesisBuild: Default + MaybeSerializeDeserialize { + | pub trait GenesisBuild: Default + sp_runtime::traits::MaybeSerializeDeserialize { | ------- required by this bound in `GenesisBuild` diff --git a/frame/support/test/tests/pallet_ui/genesis_invalid_generic.stderr b/frame/support/test/tests/pallet_ui/genesis_invalid_generic.stderr index f451f7b16aee5..f57b4a61c80c5 100644 --- a/frame/support/test/tests/pallet_ui/genesis_invalid_generic.stderr +++ b/frame/support/test/tests/pallet_ui/genesis_invalid_generic.stderr @@ -10,4 +10,4 @@ error: expected `<` 1 | #[frame_support::pallet] | ^^^^^^^^^^^^^^^^^^^^^^^^ | - = note: this error originates in an attribute macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the attribute macro `frame_support::pallet` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr b/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr index 3d7303fafdcf5..ecb57bec37a7b 100644 --- a/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr +++ b/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr @@ -2,14 +2,14 @@ error[E0107]: missing generics for trait `Hooks` --> $DIR/hooks_invalid_item.rs:12:18 | 12 | impl Hooks for Pallet {} - | ^^^^^ expected 1 type argument + | ^^^^^ expected 1 generic argument | -note: trait defined here, with 1 type parameter: `BlockNumber` +note: trait defined here, with 1 generic parameter: `BlockNumber` --> $DIR/hooks.rs:214:11 | 214 | pub trait Hooks { | ^^^^^ ----------- -help: use angle brackets to add missing type argument +help: add missing generic argument | 12 | impl Hooks for Pallet {} - | ^^^^^^^^^^^^^ + | ^^^^^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index e78eb7ff9537b..cd3032c49735a 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -1,77 +1,105 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied - --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:20:12 - | -20 | #[pallet::storage] - | ^^^^^^^ the trait `TypeInfo` is not implemented for `Bar` - | - = note: required because of the requirements on the impl of `StaticTypeInfo` for `Bar` - = note: required because of the requirements on the impl of `frame_support::storage::StorageEntryMetadata` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` - = note: required by `NAME` + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:20:12 + | +20 | #[pallet::storage] + | ^^^^^^^ the trait `TypeInfo` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `StaticTypeInfo` for `Bar` + = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` +note: required by `build_metadata` + --> $DIR/mod.rs:113:2 + | +113 | fn build_metadata(doc: Vec<&'static str>, entries: &mut Vec); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied - --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:20:12 + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:20:12 + | +20 | #[pallet::storage] + | ^^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `Decode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` +note: required by `build_metadata` + --> $DIR/mod.rs:113:2 + | +113 | fn build_metadata(doc: Vec<&'static str>, entries: &mut Vec); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:20:12 + | +20 | #[pallet::storage] + | ^^^^^^^ the trait `EncodeLike` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `FullEncode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` +note: required by `build_metadata` + --> $DIR/mod.rs:113:2 + | +113 | fn build_metadata(doc: Vec<&'static str>, entries: &mut Vec); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:20:12 + | +20 | #[pallet::storage] + | ^^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `Encode` for `Bar` + = note: required because of the requirements on the impl of `FullEncode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` +note: required by `build_metadata` + --> $DIR/mod.rs:113:2 + | +113 | fn build_metadata(doc: Vec<&'static str>, entries: &mut Vec); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:9:12 | -20 | #[pallet::storage] - | ^^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` +9 | #[pallet::pallet] + | ^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` | = note: required because of the requirements on the impl of `Decode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `frame_support::storage::StorageEntryMetadata` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` - = note: required by `NAME` + = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` +note: required by `partial_storage_info` + --> $DIR/storage.rs:88:2 + | +88 | fn partial_storage_info() -> Vec; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied - --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:20:12 + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:9:12 | -20 | #[pallet::storage] - | ^^^^^^^ the trait `EncodeLike` is not implemented for `Bar` +9 | #[pallet::pallet] + | ^^^^^^ the trait `EncodeLike` is not implemented for `Bar` | = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `frame_support::storage::StorageEntryMetadata` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` - = note: required by `NAME` + = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` +note: required by `partial_storage_info` + --> $DIR/storage.rs:88:2 + | +88 | fn partial_storage_info() -> Vec; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied - --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:20:12 + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:9:12 | -20 | #[pallet::storage] - | ^^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` +9 | #[pallet::pallet] + | ^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` | = note: required because of the requirements on the impl of `Encode` for `Bar` = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `frame_support::storage::StorageEntryMetadata` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` - = note: required by `NAME` - -error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied - --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:9:12 - | -9 | #[pallet::pallet] - | ^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` - | - = note: required because of the requirements on the impl of `Decode` for `Bar` - = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` - = note: required by `partial_storage_info` - -error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied - --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:9:12 - | -9 | #[pallet::pallet] - | ^^^^^^ the trait `EncodeLike` is not implemented for `Bar` - | - = note: required because of the requirements on the impl of `FullEncode` for `Bar` - = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` - = note: required by `partial_storage_info` - -error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied - --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:9:12 - | -9 | #[pallet::pallet] - | ^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` - | - = note: required because of the requirements on the impl of `Encode` for `Bar` - = note: required because of the requirements on the impl of `FullEncode` for `Bar` - = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` - = note: required by `partial_storage_info` + = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` +note: required by `partial_storage_info` + --> $DIR/storage.rs:88:2 + | +88 | fn partial_storage_info() -> Vec; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index d9a7ddbf3443e..3d03af836986a 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -1,77 +1,105 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied - --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:20:12 - | -20 | #[pallet::storage] - | ^^^^^^^ the trait `TypeInfo` is not implemented for `Bar` - | - = note: required because of the requirements on the impl of `StaticTypeInfo` for `Bar` - = note: required because of the requirements on the impl of `frame_support::storage::StorageEntryMetadata` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` - = note: required by `NAME` + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:20:12 + | +20 | #[pallet::storage] + | ^^^^^^^ the trait `TypeInfo` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `StaticTypeInfo` for `Bar` + = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` +note: required by `build_metadata` + --> $DIR/mod.rs:113:2 + | +113 | fn build_metadata(doc: Vec<&'static str>, entries: &mut Vec); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied - --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:20:12 + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:20:12 + | +20 | #[pallet::storage] + | ^^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `Decode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` +note: required by `build_metadata` + --> $DIR/mod.rs:113:2 + | +113 | fn build_metadata(doc: Vec<&'static str>, entries: &mut Vec); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:20:12 + | +20 | #[pallet::storage] + | ^^^^^^^ the trait `EncodeLike` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `FullEncode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` +note: required by `build_metadata` + --> $DIR/mod.rs:113:2 + | +113 | fn build_metadata(doc: Vec<&'static str>, entries: &mut Vec); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:20:12 + | +20 | #[pallet::storage] + | ^^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `Encode` for `Bar` + = note: required because of the requirements on the impl of `FullEncode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` +note: required by `build_metadata` + --> $DIR/mod.rs:113:2 + | +113 | fn build_metadata(doc: Vec<&'static str>, entries: &mut Vec); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:9:12 | -20 | #[pallet::storage] - | ^^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` +9 | #[pallet::pallet] + | ^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` | = note: required because of the requirements on the impl of `Decode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `frame_support::storage::StorageEntryMetadata` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` - = note: required by `NAME` + = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` +note: required by `partial_storage_info` + --> $DIR/storage.rs:88:2 + | +88 | fn partial_storage_info() -> Vec; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied - --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:20:12 + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:9:12 | -20 | #[pallet::storage] - | ^^^^^^^ the trait `EncodeLike` is not implemented for `Bar` +9 | #[pallet::pallet] + | ^^^^^^ the trait `EncodeLike` is not implemented for `Bar` | = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `frame_support::storage::StorageEntryMetadata` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` - = note: required by `NAME` + = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` +note: required by `partial_storage_info` + --> $DIR/storage.rs:88:2 + | +88 | fn partial_storage_info() -> Vec; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied - --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:20:12 + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:9:12 | -20 | #[pallet::storage] - | ^^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` +9 | #[pallet::pallet] + | ^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` | = note: required because of the requirements on the impl of `Encode` for `Bar` = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `frame_support::storage::StorageEntryMetadata` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` - = note: required by `NAME` - -error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied - --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:9:12 - | -9 | #[pallet::pallet] - | ^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` - | - = note: required because of the requirements on the impl of `Decode` for `Bar` - = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` - = note: required by `partial_storage_info` - -error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied - --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:9:12 - | -9 | #[pallet::pallet] - | ^^^^^^ the trait `EncodeLike` is not implemented for `Bar` - | - = note: required because of the requirements on the impl of `FullEncode` for `Bar` - = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` - = note: required by `partial_storage_info` - -error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied - --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:9:12 - | -9 | #[pallet::pallet] - | ^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` - | - = note: required because of the requirements on the impl of `Encode` for `Bar` - = note: required because of the requirements on the impl of `FullEncode` for `Bar` - = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` - = note: required by `partial_storage_info` + = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` +note: required by `partial_storage_info` + --> $DIR/storage.rs:88:2 + | +88 | fn partial_storage_info() -> Vec; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr index ad415911bc933..0ffb015e36bca 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr @@ -5,4 +5,8 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied | ^^^^^^^^^^^^^^^^^^^^^ the trait `MaxEncodedLen` is not implemented for `Bar` | = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` - = note: required by `storage_info` +note: required by `storage_info` + --> $DIR/storage.rs:71:2 + | +71 | fn storage_info() -> Vec; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr index 545520124bfee..2b70102fdac24 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr @@ -4,6 +4,10 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied 10 | #[pallet::generate_storage_info] | ^^^^^^^^^^^^^^^^^^^^^ the trait `MaxEncodedLen` is not implemented for `Bar` | - = note: required because of the requirements on the impl of `KeyGeneratorMaxEncodedLen` for `NMapKey` - = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, NMapKey, u32>` - = note: required by `storage_info` + = note: required because of the requirements on the impl of `KeyGeneratorMaxEncodedLen` for `Key` + = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, Key, u32>` +note: required by `storage_info` + --> $DIR/storage.rs:71:2 + | +71 | fn storage_info() -> Vec; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr b/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr index bf93d99cf56bd..6313bd691f943 100644 --- a/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr +++ b/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr @@ -1,4 +1,4 @@ -error: expected `getter` or `storage_prefix` +error: expected one of: `getter`, `storage_prefix`, `unbounded` --> $DIR/storage_invalid_attribute.rs:16:12 | 16 | #[pallet::generate_store(pub trait Store)] diff --git a/frame/support/test/tests/pallet_ui/storage_multiple_getters.stderr b/frame/support/test/tests/pallet_ui/storage_multiple_getters.stderr index 188eed3cb0d17..40f57f16e0df5 100644 --- a/frame/support/test/tests/pallet_ui/storage_multiple_getters.stderr +++ b/frame/support/test/tests/pallet_ui/storage_multiple_getters.stderr @@ -1,4 +1,4 @@ -error: Invalid pallet::storage, multiple argument pallet::getter found +error: Invalid attribute: Duplicate attribute --> $DIR/storage_multiple_getters.rs:20:3 | 20 | #[pallet::getter(fn foo_error)] diff --git a/frame/support/test/tests/pallet_ui/storage_multiple_renames.stderr b/frame/support/test/tests/pallet_ui/storage_multiple_renames.stderr index 9288d131d95af..52cb7e85adf21 100644 --- a/frame/support/test/tests/pallet_ui/storage_multiple_renames.stderr +++ b/frame/support/test/tests/pallet_ui/storage_multiple_renames.stderr @@ -1,4 +1,4 @@ -error: Invalid pallet::storage, multiple argument pallet::storage_prefix found +error: Invalid attribute: Duplicate attribute --> $DIR/storage_multiple_renames.rs:20:3 | 20 | #[pallet::storage_prefix = "Baz"] diff --git a/frame/support/test/tests/reserved_keyword/on_initialize.stderr b/frame/support/test/tests/reserved_keyword/on_initialize.stderr index 3df392dee9005..84e93fa52c2d9 100644 --- a/frame/support/test/tests/reserved_keyword/on_initialize.stderr +++ b/frame/support/test/tests/reserved_keyword/on_initialize.stderr @@ -4,7 +4,7 @@ error: Invalid call fn name: `on_finalize`, name is reserved and doesn't match e 28 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `$crate::__check_reserved_fn_name` (in Nightly builds, run with -Z macro-backtrace for more info) error: Invalid call fn name: `on_initialize`, name is reserved and doesn't match expected signature, please refer to `decl_module!` documentation to see the appropriate usage, or rename it to an unreserved keyword. --> $DIR/on_initialize.rs:28:1 @@ -12,7 +12,7 @@ error: Invalid call fn name: `on_initialize`, name is reserved and doesn't match 28 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `$crate::__check_reserved_fn_name` (in Nightly builds, run with -Z macro-backtrace for more info) error: Invalid call fn name: `on_runtime_upgrade`, name is reserved and doesn't match expected signature, please refer to `decl_module!` documentation to see the appropriate usage, or rename it to an unreserved keyword. --> $DIR/on_initialize.rs:28:1 @@ -20,7 +20,7 @@ error: Invalid call fn name: `on_runtime_upgrade`, name is reserved and doesn't 28 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `$crate::__check_reserved_fn_name` (in Nightly builds, run with -Z macro-backtrace for more info) error: Invalid call fn name: `offchain_worker`, name is reserved and doesn't match expected signature, please refer to `decl_module!` documentation to see the appropriate usage, or rename it to an unreserved keyword. --> $DIR/on_initialize.rs:28:1 @@ -28,7 +28,7 @@ error: Invalid call fn name: `offchain_worker`, name is reserved and doesn't mat 28 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `$crate::__check_reserved_fn_name` (in Nightly builds, run with -Z macro-backtrace for more info) error: Invalid call fn name: `deposit_event`, name is reserved and doesn't match expected signature, please refer to `decl_module!` documentation to see the appropriate usage, or rename it to an unreserved keyword. --> $DIR/on_initialize.rs:28:1 @@ -36,4 +36,4 @@ error: Invalid call fn name: `deposit_event`, name is reserved and doesn't match 28 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `$crate::__check_reserved_fn_name` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/system/benches/bench.rs b/frame/system/benches/bench.rs index 97c19c5e8159a..c8a9d4eadfea0 100644 --- a/frame/system/benches/bench.rs +++ b/frame/system/benches/bench.rs @@ -16,8 +16,6 @@ // limitations under the License. use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use frame_support::{decl_event, decl_module}; -use frame_system as system; use sp_core::H256; use sp_runtime::{ testing::Header, @@ -25,24 +23,24 @@ use sp_runtime::{ Perbill, }; +#[frame_support::pallet] mod module { - use super::*; + use frame_support::pallet_prelude::*; - pub trait Config: system::Config { - type Event: From + Into<::Event>; - } + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); - decl_module! { - pub struct Module for enum Call where origin: T::Origin { - pub fn deposit_event() = default; - } + #[pallet::config] + pub trait Config: frame_system::Config { + type Event: From + IsType<::Event>; } - decl_event!( - pub enum Event { - Complex(Vec, u32, u16, u128), - } - ); + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + Complex(Vec, u32, u16, u128), + } } type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; @@ -55,7 +53,7 @@ frame_support::construct_runtime!( UncheckedExtrinsic = UncheckedExtrinsic, { System: frame_system::{Pallet, Call, Config, Storage, Event}, - Module: module::{Pallet, Call, Event}, + Module: module::{Pallet, Event}, } ); @@ -70,7 +68,7 @@ frame_support::parameter_types! { 4 * 1024 * 1024, Perbill::from_percent(75), ); } -impl system::Config for Runtime { +impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); type BlockLength = BlockLength; @@ -101,14 +99,17 @@ impl module::Config for Runtime { } fn new_test_ext() -> sp_io::TestExternalities { - system::GenesisConfig::default().build_storage::().unwrap().into() + frame_system::GenesisConfig::default() + .build_storage::() + .unwrap() + .into() } fn deposit_events(n: usize) { let mut t = new_test_ext(); t.execute_with(|| { for _ in 0..n { - module::Module::::deposit_event(module::Event::Complex( + module::Pallet::::deposit_event(module::Event::Complex( vec![1, 2, 3], 2, 3, diff --git a/frame/system/benchmarking/src/lib.rs b/frame/system/benchmarking/src/lib.rs index beb61829bce37..e7371b1099e5e 100644 --- a/frame/system/benchmarking/src/lib.rs +++ b/frame/system/benchmarking/src/lib.rs @@ -20,7 +20,7 @@ #![cfg_attr(not(feature = "std"), no_std)] use codec::Encode; -use frame_benchmarking::{benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_benchmarking::{benchmarks, whitelisted_caller}; use frame_support::{storage, traits::Get, weights::DispatchClass}; use frame_system::{Call, DigestItemOf, Pallet as System, RawOrigin}; use sp_core::{storage::well_known_keys, ChangesTrieConfiguration}; @@ -140,6 +140,6 @@ benchmarks! { verify { assert_eq!(storage::unhashed::get_raw(&last_key), None); } -} -impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/frame/system/src/extensions/check_nonce.rs b/frame/system/src/extensions/check_nonce.rs index 081a0efa3db71..74be83398421e 100644 --- a/frame/system/src/extensions/check_nonce.rs +++ b/frame/system/src/extensions/check_nonce.rs @@ -34,7 +34,7 @@ use sp_std::vec; /// extension sets some kind of priority upon validating transactions. #[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] #[scale_info(skip_type_params(T))] -pub struct CheckNonce(#[codec(compact)] T::Index); +pub struct CheckNonce(#[codec(compact)] pub T::Index); impl CheckNonce { /// utility constructor. Used only in client/factory code. diff --git a/frame/timestamp/src/benchmarking.rs b/frame/timestamp/src/benchmarking.rs index 97ddd4cddd63f..98e05439df72b 100644 --- a/frame/timestamp/src/benchmarking.rs +++ b/frame/timestamp/src/benchmarking.rs @@ -20,7 +20,7 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use frame_benchmarking::{benchmarks, impl_benchmark_test_suite, TrackedStorageKey}; +use frame_benchmarking::{benchmarks, TrackedStorageKey}; use frame_support::{ensure, traits::OnFinalize}; use frame_system::RawOrigin; @@ -55,6 +55,6 @@ benchmarks! { verify { ensure!(!DidUpdate::::exists(), "Time was not removed."); } -} -impl_benchmark_test_suite!(Timestamp, crate::tests::new_test_ext(), crate::tests::Test); + impl_benchmark_test_suite!(Timestamp, crate::tests::new_test_ext(), crate::tests::Test); +} diff --git a/frame/tips/src/benchmarking.rs b/frame/tips/src/benchmarking.rs index 5e08121855210..d8227332bb334 100644 --- a/frame/tips/src/benchmarking.rs +++ b/frame/tips/src/benchmarking.rs @@ -19,7 +19,7 @@ #![cfg(feature = "runtime-benchmarks")] -use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_benchmarking::{account, benchmarks, whitelisted_caller}; use frame_support::ensure; use frame_system::RawOrigin; use sp_runtime::traits::Saturating; @@ -190,6 +190,6 @@ benchmarks! { let hash = T::Hashing::hash_of(&(&reason_hash, &beneficiary)); ensure!(Tips::::contains_key(hash), "tip does not exist"); }: _(RawOrigin::Root, hash) -} -impl_benchmark_test_suite!(TipsMod, crate::tests::new_test_ext(), crate::tests::Test); + impl_benchmark_test_suite!(TipsMod, crate::tests::new_test_ext(), crate::tests::Test); +} diff --git a/frame/transaction-payment/Cargo.toml b/frame/transaction-payment/Cargo.toml index 546939692bbaf..1dfeb0c2baa4b 100644 --- a/frame/transaction-payment/Cargo.toml +++ b/frame/transaction-payment/Cargo.toml @@ -18,7 +18,7 @@ codec = { package = "parity-scale-codec", version = "2.0.0", default-features = ] } scale-info = { version = "1.0", default-features = false, features = ["derive"] } serde = { version = "1.0.126", optional = true } -smallvec = "1.4.1" +smallvec = "1.7.0" sp-core = { version = "4.0.0-dev", path = "../../primitives/core", default-features = false } sp-io = { version = "4.0.0-dev", path = "../../primitives/io", default-features = false } diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index e3a3bccc3d39a..11dbcc010f67c 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -332,12 +332,7 @@ pub mod pallet { .unwrap(), ); - // This is the minimum value of the multiplier. Make sure that if we collapse to this - // value, we can recover with a reasonable amount of traffic. For this test we assert - // that if we collapse to minimum, the trend will be positive with a weight value - // which is 1% more than the target. - let min_value = T::FeeMultiplierUpdate::min(); - let mut target = T::FeeMultiplierUpdate::target() * + let target = T::FeeMultiplierUpdate::target() * T::BlockWeights::get().get(DispatchClass::Normal).max_total.expect( "Setting `max_total` for `Normal` dispatch class is not compatible with \ `transaction-payment` pallet.", @@ -348,10 +343,17 @@ pub mod pallet { // this is most likely because in a test setup we set everything to (). return } - target += addition; #[cfg(any(feature = "std", test))] sp_io::TestExternalities::new_empty().execute_with(|| { + // This is the minimum value of the multiplier. Make sure that if we collapse to + // this value, we can recover with a reasonable amount of traffic. For this test we + // assert that if we collapse to minimum, the trend will be positive with a weight + // value which is 1% more than the target. + let min_value = T::FeeMultiplierUpdate::min(); + + let target = target + addition; + >::set_block_consumed_resources(target, 0); let next = T::FeeMultiplierUpdate::convert(min_value); assert!( diff --git a/frame/transaction-storage/src/benchmarking.rs b/frame/transaction-storage/src/benchmarking.rs index d5da6a42b46f0..6ca9b247f0228 100644 --- a/frame/transaction-storage/src/benchmarking.rs +++ b/frame/transaction-storage/src/benchmarking.rs @@ -20,7 +20,7 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use frame_benchmarking::{benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_benchmarking::{benchmarks, whitelisted_caller}; use frame_support::traits::{Currency, OnFinalize, OnInitialize}; use frame_system::{EventRecord, Pallet as System, RawOrigin}; use sp_runtime::traits::{Bounded, One, Zero}; @@ -143,6 +143,6 @@ benchmarks! { verify { assert_last_event::(Event::ProofChecked.into()); } -} -impl_benchmark_test_suite!(TransactionStorage, crate::mock::new_test_ext(), crate::mock::Test); + impl_benchmark_test_suite!(TransactionStorage, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/frame/transaction-storage/src/lib.rs b/frame/transaction-storage/src/lib.rs index 2fe3c04e0229f..bc31199d90391 100644 --- a/frame/transaction-storage/src/lib.rs +++ b/frame/transaction-storage/src/lib.rs @@ -37,7 +37,7 @@ use sp_runtime::traits::{BlakeTwo256, Hash, One, Saturating, Zero}; use sp_std::{prelude::*, result}; use sp_transaction_storage_proof::{ encode_index, random_chunk, InherentError, TransactionStorageProof, CHUNK_SIZE, - DEFAULT_STORAGE_PERIOD, INHERENT_IDENTIFIER, + INHERENT_IDENTIFIER, }; /// A type alias for the balance type from this pallet's point of view. @@ -380,7 +380,7 @@ pub mod pallet { Self { byte_fee: 10u32.into(), entry_fee: 1000u32.into(), - storage_period: DEFAULT_STORAGE_PERIOD.into(), + storage_period: sp_transaction_storage_proof::DEFAULT_STORAGE_PERIOD.into(), max_block_transactions: DEFAULT_MAX_BLOCK_TRANSACTIONS, max_transaction_size: DEFAULT_MAX_TRANSACTION_SIZE, } diff --git a/frame/treasury/src/benchmarking.rs b/frame/treasury/src/benchmarking.rs index 2fe0bad704f2b..8570b0efdb945 100644 --- a/frame/treasury/src/benchmarking.rs +++ b/frame/treasury/src/benchmarking.rs @@ -21,7 +21,7 @@ use super::{Pallet as Treasury, *}; -use frame_benchmarking::{account, benchmarks_instance_pallet, impl_benchmark_test_suite}; +use frame_benchmarking::{account, benchmarks_instance_pallet}; use frame_support::{ensure, traits::OnInitialize}; use frame_system::RawOrigin; @@ -94,6 +94,6 @@ benchmarks_instance_pallet! { }: { Treasury::::on_initialize(T::BlockNumber::zero()); } -} -impl_benchmark_test_suite!(Treasury, crate::tests::new_test_ext(), crate::tests::Test); + impl_benchmark_test_suite!(Treasury, crate::tests::new_test_ext(), crate::tests::Test); +} diff --git a/frame/try-runtime/src/lib.rs b/frame/try-runtime/src/lib.rs index b2dfdfac6429e..754fc1d2a3303 100644 --- a/frame/try-runtime/src/lib.rs +++ b/frame/try-runtime/src/lib.rs @@ -32,6 +32,12 @@ sp_api::decl_runtime_apis! { /// /// Returns the consumed weight of the migration in case of a successful one, combined with /// the total allowed block weight of the runtime. - fn on_runtime_upgrade() -> Result<(Weight, Weight), sp_runtime::RuntimeString>; + fn on_runtime_upgrade() -> (Weight, Weight); + + /// Execute the given block, but don't check that its state root matches that of yours. + /// + /// This is only sensible where the incoming block is from a different network, yet it has + /// the same block format as the runtime implementing this API. + fn execute_block_no_check(block: Block) -> Weight; } } diff --git a/frame/uniques/src/benchmarking.rs b/frame/uniques/src/benchmarking.rs index 5c777dc961e9e..0e161bf7bfe85 100644 --- a/frame/uniques/src/benchmarking.rs +++ b/frame/uniques/src/benchmarking.rs @@ -21,8 +21,7 @@ use super::*; use frame_benchmarking::{ - account, benchmarks_instance_pallet, impl_benchmark_test_suite, whitelist_account, - whitelisted_caller, + account, benchmarks_instance_pallet, whitelist_account, whitelisted_caller, }; use frame_support::{ dispatch::UnfilteredDispatchable, @@ -379,6 +378,6 @@ benchmarks_instance_pallet! { verify { assert_last_event::(Event::ApprovalCancelled(class, instance, caller, delegate).into()); } -} -impl_benchmark_test_suite!(Uniques, crate::mock::new_test_ext(), crate::mock::Test); + impl_benchmark_test_suite!(Uniques, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/frame/uniques/src/functions.rs b/frame/uniques/src/functions.rs index a878a4910f769..68acf7f1879fb 100644 --- a/frame/uniques/src/functions.rs +++ b/frame/uniques/src/functions.rs @@ -80,6 +80,41 @@ impl, I: 'static> Pallet { Ok(()) } + pub(super) fn do_destroy_class( + class: T::ClassId, + witness: DestroyWitness, + maybe_check_owner: Option, + ) -> Result { + Class::::try_mutate_exists(class, |maybe_details| { + let class_details = maybe_details.take().ok_or(Error::::Unknown)?; + if let Some(check_owner) = maybe_check_owner { + ensure!(class_details.owner == check_owner, Error::::NoPermission); + } + ensure!(class_details.instances == witness.instances, Error::::BadWitness); + ensure!( + class_details.instance_metadatas == witness.instance_metadatas, + Error::::BadWitness + ); + ensure!(class_details.attributes == witness.attributes, Error::::BadWitness); + + for (instance, details) in Asset::::drain_prefix(&class) { + Account::::remove((&details.owner, &class, &instance)); + } + InstanceMetadataOf::::remove_prefix(&class, None); + ClassMetadataOf::::remove(&class); + Attribute::::remove_prefix((&class,), None); + T::Currency::unreserve(&class_details.owner, class_details.total_deposit); + + Self::deposit_event(Event::Destroyed(class)); + + Ok(DestroyWitness { + instances: class_details.instances, + instance_metadatas: class_details.instance_metadatas, + attributes: class_details.attributes, + }) + }) + } + pub(super) fn do_mint( class: T::ClassId, instance: T::InstanceId, diff --git a/frame/uniques/src/impl_nonfungibles.rs b/frame/uniques/src/impl_nonfungibles.rs index c5d5c6089f865..e68d2d4deecda 100644 --- a/frame/uniques/src/impl_nonfungibles.rs +++ b/frame/uniques/src/impl_nonfungibles.rs @@ -19,13 +19,10 @@ use super::*; use frame_support::{ - traits::{ - tokens::nonfungibles::{Create, Inspect, InspectEnumerable, Mutate, Transfer}, - Get, - }, + traits::{tokens::nonfungibles::*, Get}, BoundedSlice, }; -use sp_runtime::DispatchResult; +use sp_runtime::{DispatchError, DispatchResult}; use sp_std::convert::TryFrom; impl, I: 'static> Inspect<::AccountId> for Pallet { @@ -106,6 +103,22 @@ impl, I: 'static> Create<::AccountId> for Pallet } } +impl, I: 'static> Destroy<::AccountId> for Pallet { + type DestroyWitness = DestroyWitness; + + fn get_destroy_witness(class: &Self::ClassId) -> Option { + Class::::get(class).map(|a| a.destroy_witness()) + } + + fn destroy( + class: Self::ClassId, + witness: Self::DestroyWitness, + maybe_check_owner: Option, + ) -> Result { + Self::do_destroy_class(class, witness, maybe_check_owner) + } +} + impl, I: 'static> Mutate<::AccountId> for Pallet { fn mint_into( class: &Self::ClassId, diff --git a/frame/uniques/src/lib.rs b/frame/uniques/src/lib.rs index 8c716694051b5..1bf220e4a7876 100644 --- a/frame/uniques/src/lib.rs +++ b/frame/uniques/src/lib.rs @@ -381,37 +381,19 @@ pub mod pallet { origin: OriginFor, #[pallet::compact] class: T::ClassId, witness: DestroyWitness, - ) -> DispatchResult { + ) -> DispatchResultWithPostInfo { let maybe_check_owner = match T::ForceOrigin::try_origin(origin) { Ok(_) => None, Err(origin) => Some(ensure_signed(origin)?), }; - Class::::try_mutate_exists(class, |maybe_details| { - let class_details = maybe_details.take().ok_or(Error::::Unknown)?; - if let Some(check_owner) = maybe_check_owner { - ensure!(class_details.owner == check_owner, Error::::NoPermission); - } - ensure!(class_details.instances == witness.instances, Error::::BadWitness); - ensure!( - class_details.instance_metadatas == witness.instance_metadatas, - Error::::BadWitness - ); - ensure!(class_details.attributes == witness.attributes, Error::::BadWitness); - - for (instance, details) in Asset::::drain_prefix(&class) { - Account::::remove((&details.owner, &class, &instance)); - } - InstanceMetadataOf::::remove_prefix(&class, None); - ClassMetadataOf::::remove(&class); - Attribute::::remove_prefix((&class,), None); - T::Currency::unreserve(&class_details.owner, class_details.total_deposit); - - Self::deposit_event(Event::Destroyed(class)); - - // NOTE: could use postinfo to reflect the actual number of - // accounts/sufficient/approvals - Ok(()) - }) + let details = Self::do_destroy_class(class, witness, maybe_check_owner)?; + + Ok(Some(T::WeightInfo::destroy( + details.instances, + details.instance_metadatas, + details.attributes, + )) + .into()) } /// Mint an asset instance of a particular class. diff --git a/frame/utility/src/benchmarking.rs b/frame/utility/src/benchmarking.rs index 210a6156499cf..70cc61f87b9c9 100644 --- a/frame/utility/src/benchmarking.rs +++ b/frame/utility/src/benchmarking.rs @@ -20,7 +20,7 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_benchmarking::{account, benchmarks, whitelisted_caller}; use frame_system::RawOrigin; const SEED: u32 = 0; @@ -63,6 +63,6 @@ benchmarks! { verify { assert_last_event::(Event::BatchCompleted.into()) } -} -impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::tests::Test); + impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::tests::Test); +} diff --git a/frame/vesting/src/benchmarking.rs b/frame/vesting/src/benchmarking.rs index 5cdc14c8fdaca..b52ddac3e8857 100644 --- a/frame/vesting/src/benchmarking.rs +++ b/frame/vesting/src/benchmarking.rs @@ -19,7 +19,7 @@ #![cfg(feature = "runtime-benchmarks")] -use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_benchmarking::{account, benchmarks, whitelisted_caller}; use frame_support::assert_ok; use frame_system::{Pallet as System, RawOrigin}; use sp_runtime::traits::{Bounded, CheckedDiv, CheckedMul}; @@ -374,10 +374,10 @@ benchmarks! { T::Currency::transfer(&caller, &test_dest, expected_balance, ExistenceRequirement::AllowDeath) ); } -} -impl_benchmark_test_suite!( - Vesting, - crate::mock::ExtBuilder::default().existential_deposit(256).build(), - crate::mock::Test, -); + impl_benchmark_test_suite!( + Vesting, + crate::mock::ExtBuilder::default().existential_deposit(256).build(), + crate::mock::Test, + ); +} diff --git a/primitives/api/test/tests/ui/empty_impl_runtime_apis_call.stderr b/primitives/api/test/tests/ui/empty_impl_runtime_apis_call.stderr index b08f056b57d1c..bf201e8b55a78 100644 --- a/primitives/api/test/tests/ui/empty_impl_runtime_apis_call.stderr +++ b/primitives/api/test/tests/ui/empty_impl_runtime_apis_call.stderr @@ -4,4 +4,4 @@ error: No api implementation given! 17 | sp_api::impl_runtime_apis! {} | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `sp_api::impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr b/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr index 9dd84c24b6781..2fb06c3565ea2 100644 --- a/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr +++ b/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr @@ -38,7 +38,7 @@ error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for tr | = note: expected fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option, Vec<_>) -> Result<_, _>` found fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option, Vec<_>) -> Result<_, _>` - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `sp_api::impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0308]: mismatched types --> $DIR/impl_incorrect_method_signature.rs:17:1 @@ -52,7 +52,7 @@ error[E0308]: mismatched types 33 | | } | |_^ expected `u64`, found struct `std::string::String` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `sp_api::impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0308]: mismatched types --> $DIR/impl_incorrect_method_signature.rs:19:11 diff --git a/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr b/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr index 47cd9e01d910f..befe67c1d0b4a 100644 --- a/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr +++ b/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr @@ -10,4 +10,4 @@ error: `BlockId` needs to be taken by reference and not by value! 19 | | } | |_^ | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `sp_api::mock_impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/primitives/api/test/tests/ui/mock_only_self_reference.stderr b/primitives/api/test/tests/ui/mock_only_self_reference.stderr index 7385fe4745989..1b1d2553940a5 100644 --- a/primitives/api/test/tests/ui/mock_only_self_reference.stderr +++ b/primitives/api/test/tests/ui/mock_only_self_reference.stderr @@ -36,7 +36,7 @@ error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for tr | = note: expected fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option, Vec<_>) -> Result<_, _>` found fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option<()>, Vec<_>) -> Result<_, _>` - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `sp_api::mock_impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0053]: method `Api_test2_runtime_api_impl` has an incompatible type for trait --> $DIR/mock_only_self_reference.rs:12:1 @@ -64,4 +64,4 @@ error[E0053]: method `Api_test2_runtime_api_impl` has an incompatible type for t | = note: expected fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option, Vec<_>) -> Result<_, _>` found fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option<()>, Vec<_>) -> Result<_, _>` - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `sp_api::mock_impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr index a0a16c4a493db..063cbff60f81e 100644 --- a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr +++ b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr @@ -38,7 +38,7 @@ error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for tr | = note: expected fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option, Vec<_>) -> Result<_, _>` found fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option<&u64>, Vec<_>) -> Result<_, _>` - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `sp_api::impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0308]: mismatched types --> $DIR/type_reference_in_impl_runtime_apis_call.rs:17:1 @@ -52,7 +52,7 @@ error[E0308]: mismatched types 35 | | } | |_^ expected `u64`, found `&u64` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `sp_api::impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0308]: mismatched types --> $DIR/type_reference_in_impl_runtime_apis_call.rs:19:11 diff --git a/primitives/beefy/Cargo.toml b/primitives/beefy/Cargo.toml new file mode 100644 index 0000000000000..633ac0e8fbcd1 --- /dev/null +++ b/primitives/beefy/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "beefy-primitives" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" + +[dependencies] +codec = { version = "2.2.0", package = "parity-scale-codec", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } + +sp-api = { version = "4.0.0-dev", path = "../api", default-features = false } +sp-application-crypto = { version = "4.0.0-dev", path = "../application-crypto", default-features = false } +sp-core = { version = "4.0.0-dev", path = "../core", default-features = false } +sp-runtime = { version = "4.0.0-dev", path = "../runtime", default-features = false } +sp-std = { version = "4.0.0-dev", path = "../std", default-features = false } + +[dev-dependencies] +hex-literal = "0.3" + +sp-keystore = { version = "0.10.0-dev", path = "../keystore" } + +[features] +default = ["std"] +std = [ + "codec/std", + "scale-info/std", + "sp-api/std", + "sp-application-crypto/std", + "sp-core/std", + "sp-runtime/std", + "sp-std/std", +] diff --git a/primitives/beefy/src/commitment.rs b/primitives/beefy/src/commitment.rs new file mode 100644 index 0000000000000..7aab93bbcb973 --- /dev/null +++ b/primitives/beefy/src/commitment.rs @@ -0,0 +1,264 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sp_std::{cmp, prelude::*}; + +use crate::{crypto::Signature, ValidatorSetId}; + +/// A commitment signed by GRANDPA validators as part of BEEFY protocol. +/// +/// The commitment contains a [payload] extracted from the finalized block at height [block_number]. +/// GRANDPA validators collect signatures on commitments and a stream of such signed commitments +/// (see [SignedCommitment]) forms the BEEFY protocol. +#[derive(Clone, Debug, PartialEq, Eq, codec::Encode, codec::Decode)] +pub struct Commitment { + /// The payload being signed. + /// + /// This should be some form of cumulative representation of the chain (think MMR root hash). + /// The payload should also contain some details that allow the light client to verify next + /// validator set. The protocol does not enforce any particular format of this data, + /// nor how often it should be present in commitments, however the light client has to be + /// provided with full validator set whenever it performs the transition (i.e. importing first + /// block with [validator_set_id] incremented). + pub payload: TPayload, + + /// Finalized block number this commitment is for. + /// + /// GRANDPA validators agree on a block they create a commitment for and start collecting + /// signatures. This process is called a round. + /// There might be multiple rounds in progress (depending on the block choice rule), however + /// since the payload is supposed to be cumulative, it is not required to import all + /// commitments. + /// BEEFY light client is expected to import at least one commitment per epoch, + /// but is free to import as many as it requires. + pub block_number: TBlockNumber, + + /// BEEFY validator set supposed to sign this commitment. + /// + /// Validator set is changing once per epoch. The Light Client must be provided by details + /// about the validator set whenever it's importing first commitment with a new + /// `validator_set_id`. Validator set data MUST be verifiable, for instance using [payload] + /// information. + pub validator_set_id: ValidatorSetId, +} + +impl cmp::PartialOrd for Commitment +where + TBlockNumber: cmp::Ord, + TPayload: cmp::Eq, +{ + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl cmp::Ord for Commitment +where + TBlockNumber: cmp::Ord, + TPayload: cmp::Eq, +{ + fn cmp(&self, other: &Self) -> cmp::Ordering { + self.validator_set_id + .cmp(&other.validator_set_id) + .then_with(|| self.block_number.cmp(&other.block_number)) + } +} + +/// A commitment with matching GRANDPA validators' signatures. +#[derive(Clone, Debug, PartialEq, Eq, codec::Encode, codec::Decode)] +pub struct SignedCommitment { + /// The commitment signatures are collected for. + pub commitment: Commitment, + /// GRANDPA validators' signatures for the commitment. + /// + /// The length of this `Vec` must match number of validators in the current set (see + /// [Commitment::validator_set_id]). + pub signatures: Vec>, +} + +impl SignedCommitment { + /// Return the number of collected signatures. + pub fn no_of_signatures(&self) -> usize { + self.signatures.iter().filter(|x| x.is_some()).count() + } +} + +/// A [SignedCommitment] with a version number. This variant will be appended +/// to the block justifications for the block for which the signed commitment +/// has been generated. +#[derive(Clone, Debug, PartialEq, codec::Encode, codec::Decode)] +pub enum VersionedCommitment { + #[codec(index = 1)] + /// Current active version + V1(SignedCommitment), +} + +#[cfg(test)] +mod tests { + + use sp_core::{keccak_256, Pair}; + use sp_keystore::{testing::KeyStore, SyncCryptoStore, SyncCryptoStorePtr}; + + use super::*; + use codec::Decode; + + use crate::{crypto, KEY_TYPE}; + + type TestCommitment = Commitment; + type TestSignedCommitment = SignedCommitment; + type TestVersionedCommitment = VersionedCommitment; + + // The mock signatures are equivalent to the ones produced by the BEEFY keystore + fn mock_signatures() -> (crypto::Signature, crypto::Signature) { + let store: SyncCryptoStorePtr = KeyStore::new().into(); + + let alice = sp_core::ecdsa::Pair::from_string("//Alice", None).unwrap(); + let _ = + SyncCryptoStore::insert_unknown(&*store, KEY_TYPE, "//Alice", alice.public().as_ref()) + .unwrap(); + + let msg = keccak_256(b"This is the first message"); + let sig1 = SyncCryptoStore::ecdsa_sign_prehashed(&*store, KEY_TYPE, &alice.public(), &msg) + .unwrap() + .unwrap(); + + let msg = keccak_256(b"This is the second message"); + let sig2 = SyncCryptoStore::ecdsa_sign_prehashed(&*store, KEY_TYPE, &alice.public(), &msg) + .unwrap() + .unwrap(); + + (sig1.into(), sig2.into()) + } + + #[test] + fn commitment_encode_decode() { + // given + let commitment: TestCommitment = + Commitment { payload: "Hello World!".into(), block_number: 5, validator_set_id: 0 }; + + // when + let encoded = codec::Encode::encode(&commitment); + let decoded = TestCommitment::decode(&mut &*encoded); + + // then + assert_eq!(decoded, Ok(commitment)); + assert_eq!( + encoded, + hex_literal::hex!( + "3048656c6c6f20576f726c6421050000000000000000000000000000000000000000000000" + ) + ); + } + + #[test] + fn signed_commitment_encode_decode() { + // given + let commitment: TestCommitment = + Commitment { payload: "Hello World!".into(), block_number: 5, validator_set_id: 0 }; + + let sigs = mock_signatures(); + + let signed = SignedCommitment { + commitment, + signatures: vec![None, None, Some(sigs.0), Some(sigs.1)], + }; + + // when + let encoded = codec::Encode::encode(&signed); + let decoded = TestSignedCommitment::decode(&mut &*encoded); + + // then + assert_eq!(decoded, Ok(signed)); + assert_eq!( + encoded, + hex_literal::hex!( + "3048656c6c6f20576f726c64210500000000000000000000000000000000000000000000001000 + 0001558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d + 10dd3cd68ce3dc0c33c86e99bcb7816f9ba01012d6e1f8105c337a86cdd9aaacdc496577f3db8c55ef9e6fd48f2c5c05a + 2274707491635d8ba3df64f324575b7b2a34487bca2324b6a0046395a71681be3d0c2a00" + ) + ); + } + + #[test] + fn signed_commitment_count_signatures() { + // given + let commitment: TestCommitment = + Commitment { payload: "Hello World!".into(), block_number: 5, validator_set_id: 0 }; + + let sigs = mock_signatures(); + + let mut signed = SignedCommitment { + commitment, + signatures: vec![None, None, Some(sigs.0), Some(sigs.1)], + }; + assert_eq!(signed.no_of_signatures(), 2); + + // when + signed.signatures[2] = None; + + // then + assert_eq!(signed.no_of_signatures(), 1); + } + + #[test] + fn commitment_ordering() { + fn commitment( + block_number: u128, + validator_set_id: crate::ValidatorSetId, + ) -> TestCommitment { + Commitment { payload: "Hello World!".into(), block_number, validator_set_id } + } + + // given + let a = commitment(1, 0); + let b = commitment(2, 1); + let c = commitment(10, 0); + let d = commitment(10, 1); + + // then + assert!(a < b); + assert!(a < c); + assert!(c < b); + assert!(c < d); + assert!(b < d); + } + + #[test] + fn versioned_commitment_encode_decode() { + let commitment: TestCommitment = + Commitment { payload: "Hello World!".into(), block_number: 5, validator_set_id: 0 }; + + let sigs = mock_signatures(); + + let signed = SignedCommitment { + commitment, + signatures: vec![None, None, Some(sigs.0), Some(sigs.1)], + }; + + let versioned = TestVersionedCommitment::V1(signed.clone()); + + let encoded = codec::Encode::encode(&versioned); + + assert_eq!(1, encoded[0]); + assert_eq!(encoded[1..], codec::Encode::encode(&signed)); + + let decoded = TestVersionedCommitment::decode(&mut &*encoded); + + assert_eq!(decoded, Ok(versioned)); + } +} diff --git a/primitives/beefy/src/lib.rs b/primitives/beefy/src/lib.rs new file mode 100644 index 0000000000000..790b915ab98db --- /dev/null +++ b/primitives/beefy/src/lib.rs @@ -0,0 +1,137 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg_attr(not(feature = "std"), no_std)] +#![warn(missing_docs)] + +//! Primitives for BEEFY protocol. +//! +//! The crate contains shared data types used by BEEFY protocol and documentation (in a form of +//! code) for building a BEEFY light client. +//! +//! BEEFY is a gadget that runs alongside another finality gadget (for instance GRANDPA). +//! For simplicity (and the initially intended use case) the documentation says GRANDPA in places +//! where a more abstract "Finality Gadget" term could be used, but there is no reason why BEEFY +//! wouldn't run with some other finality scheme. +//! BEEFY validator set is supposed to be tracking the Finality Gadget validator set, but note that +//! it will use a different set of keys. For Polkadot use case we plan to use `secp256k1` for BEEFY, +//! while GRANDPA uses `ed25519`. + +mod commitment; +pub mod mmr; +pub mod witness; + +pub use commitment::{Commitment, SignedCommitment, VersionedCommitment}; + +use codec::{Codec, Decode, Encode}; +use scale_info::TypeInfo; +use sp_core::H256; +use sp_std::prelude::*; + +/// Key type for BEEFY module. +pub const KEY_TYPE: sp_application_crypto::KeyTypeId = sp_application_crypto::KeyTypeId(*b"beef"); + +/// BEEFY cryptographic types +/// +/// This module basically introduces three crypto types: +/// - `crypto::Pair` +/// - `crypto::Public` +/// - `crypto::Signature` +/// +/// Your code should use the above types as concrete types for all crypto related +/// functionality. +/// +/// The current underlying crypto scheme used is ECDSA. This can be changed, +/// without affecting code restricted against the above listed crypto types. +pub mod crypto { + use sp_application_crypto::{app_crypto, ecdsa}; + app_crypto!(ecdsa, crate::KEY_TYPE); + + /// Identity of a BEEFY authority using ECDSA as its crypto. + pub type AuthorityId = Public; + + /// Signature for a BEEFY authority using ECDSA as its crypto. + pub type AuthoritySignature = Signature; +} + +/// The `ConsensusEngineId` of BEEFY. +pub const BEEFY_ENGINE_ID: sp_runtime::ConsensusEngineId = *b"BEEF"; + +/// Authority set id starts with zero at genesis +pub const GENESIS_AUTHORITY_SET_ID: u64 = 0; + +/// A typedef for validator set id. +pub type ValidatorSetId = u64; + +/// A set of BEEFY authorities, a.k.a. validators. +#[derive(Decode, Encode, Debug, PartialEq, Clone, TypeInfo)] +pub struct ValidatorSet { + /// Public keys of the validator set elements + pub validators: Vec, + /// Identifier of the validator set + pub id: ValidatorSetId, +} + +impl ValidatorSet { + /// Return an empty validator set with id of 0. + pub fn empty() -> Self { + Self { validators: Default::default(), id: Default::default() } + } +} + +/// The index of an authority. +pub type AuthorityIndex = u32; + +/// The type used to represent an MMR root hash. +pub type MmrRootHash = H256; + +/// A consensus log item for BEEFY. +#[derive(Decode, Encode, TypeInfo)] +pub enum ConsensusLog { + /// The authorities have changed. + #[codec(index = 1)] + AuthoritiesChange(ValidatorSet), + /// Disable the authority with given index. + #[codec(index = 2)] + OnDisabled(AuthorityIndex), + /// MMR root hash. + #[codec(index = 3)] + MmrRoot(MmrRootHash), +} + +/// BEEFY vote message. +/// +/// A vote message is a direct vote created by a BEEFY node on every voting round +/// and is gossiped to its peers. +#[derive(Debug, Decode, Encode, TypeInfo)] +pub struct VoteMessage { + /// Commit to information extracted from a finalized block + pub commitment: Commitment, + /// Node authority id + pub id: Id, + /// Node signature + pub signature: Signature, +} + +sp_api::decl_runtime_apis! { + /// API necessary for BEEFY voters. + pub trait BeefyApi + { + /// Return the current active BEEFY validator set + fn validator_set() -> ValidatorSet; + } +} diff --git a/primitives/beefy/src/mmr.rs b/primitives/beefy/src/mmr.rs new file mode 100644 index 0000000000000..e428c0ea01215 --- /dev/null +++ b/primitives/beefy/src/mmr.rs @@ -0,0 +1,132 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! BEEFY + MMR utilties. +//! +//! While BEEFY can be used completely indepentently as an additional consensus gadget, +//! it is designed around a main use case of making bridging standalone networks together. +//! For that use case it's common to use some aggregated data structure (like MMR) to be +//! used in conjunction with BEEFY, to be able to efficiently prove any past blockchain data. +//! +//! This module contains primitives used by Polkadot implementation of the BEEFY+MMR bridge, +//! but we imagine they will be useful for other chains that either want to bridge with Polkadot +//! or are completely standalone, but heavily inspired by Polkadot. + +use codec::{Decode, Encode}; +use scale_info::TypeInfo; + +/// A standard leaf that gets added every block to the MMR constructed by Substrate's `pallet_mmr`. +#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] +pub struct MmrLeaf { + /// Version of the leaf format. + /// + /// Can be used to enable future format migrations and compatibility. + /// See [`MmrLeafVersion`] documentation for details. + pub version: MmrLeafVersion, + /// Current block parent number and hash. + pub parent_number_and_hash: (BlockNumber, Hash), + /// A merkle root of the next BEEFY authority set. + pub beefy_next_authority_set: BeefyNextAuthoritySet, + /// A merkle root of all registered parachain heads. + pub parachain_heads: MerkleRoot, +} + +/// A MMR leaf versioning scheme. +/// +/// Version is a single byte that constist of two components: +/// - `major` - 3 bits +/// - `minor` - 5 bits +/// +/// Any change in encoding that adds new items to the structure is considered non-breaking, hence +/// only requires an update of `minor` version. Any backward incompatible change (i.e. decoding to a +/// previous leaf format fails) should be indicated with `major` version bump. +/// +/// Given that adding new struct elements in SCALE is backward compatible (i.e. old format can be +/// still decoded, the new fields will simply be ignored). We expect the major version to be bumped +/// very rarely (hopefuly never). +#[derive(Debug, Default, PartialEq, Eq, Clone, Encode, Decode)] +pub struct MmrLeafVersion(u8); +impl MmrLeafVersion { + /// Create new version object from `major` and `minor` components. + /// + /// Panics if any of the component occupies more than 4 bits. + pub fn new(major: u8, minor: u8) -> Self { + if major > 0b111 || minor > 0b11111 { + panic!("Version components are too big."); + } + let version = (major << 5) + minor; + Self(version) + } + + /// Split the version into `major` and `minor` sub-components. + pub fn split(&self) -> (u8, u8) { + let major = self.0 >> 5; + let minor = self.0 & 0b11111; + (major, minor) + } +} + +/// Details of the next BEEFY authority set. +#[derive(Debug, Default, PartialEq, Eq, Clone, Encode, Decode, TypeInfo)] +pub struct BeefyNextAuthoritySet { + /// Id of the next set. + /// + /// Id is required to correlate BEEFY signed commitments with the validator set. + /// Light Client can easily verify that the commitment witness it is getting is + /// produced by the latest validator set. + pub id: crate::ValidatorSetId, + /// Number of validators in the set. + /// + /// Some BEEFY Light Clients may use an interactive protocol to verify only subset + /// of signatures. We put set length here, so that these clients can verify the minimal + /// number of required signatures. + pub len: u32, + /// Merkle Root Hash build from BEEFY AuthorityIds. + /// + /// This is used by Light Clients to confirm that the commitments are signed by the correct + /// validator set. Light Clients using interactive protocol, might verify only subset of + /// signatures, hence don't require the full list here (will receive inclusion proofs). + pub root: MerkleRoot, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn should_construct_version_correctly() { + let tests = vec![(0, 0, 0b00000000), (7, 2, 0b11100010), (7, 31, 0b11111111)]; + + for (major, minor, version) in tests { + let v = MmrLeafVersion::new(major, minor); + assert_eq!(v.encode(), vec![version], "Encoding does not match."); + assert_eq!(v.split(), (major, minor)); + } + } + + #[test] + #[should_panic] + fn should_panic_if_major_too_large() { + MmrLeafVersion::new(8, 0); + } + + #[test] + #[should_panic] + fn should_panic_if_minor_too_large() { + MmrLeafVersion::new(0, 32); + } +} diff --git a/primitives/beefy/src/witness.rs b/primitives/beefy/src/witness.rs new file mode 100644 index 0000000000000..c28a464e72df5 --- /dev/null +++ b/primitives/beefy/src/witness.rs @@ -0,0 +1,162 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Primitives for light, 2-phase interactive verification protocol. +//! +//! Instead of submitting full list of signatures, it's possible to submit first a witness +//! form of [SignedCommitment]. +//! This can later be verified by the client requesting only some (out of all) signatures for +//! verification. This allows lowering the data and computation cost of verifying the +//! signed commitment. + +use sp_std::prelude::*; + +use crate::{ + commitment::{Commitment, SignedCommitment}, + crypto::Signature, +}; + +/// A light form of [SignedCommitment]. +/// +/// This is a light ("witness") form of the signed commitment. Instead of containing full list of +/// signatures, which might be heavy and expensive to verify, it only contains a bit vector of +/// validators which signed the original [SignedCommitment] and a merkle root of all signatures. +/// +/// This can be used by light clients for 2-phase interactive verification (for instance for +/// Ethereum Mainnet), in a commit-reveal like scheme, where first we submit only the signed +/// commitment witness and later on, the client picks only some signatures to verify at random. +#[derive(Debug, PartialEq, Eq, codec::Encode, codec::Decode)] +pub struct SignedCommitmentWitness { + /// The full content of the commitment. + pub commitment: Commitment, + + /// The bit vector of validators who signed the commitment. + pub signed_by: Vec, // TODO [ToDr] Consider replacing with bitvec crate + + /// A merkle root of signatures in the original signed commitment. + pub signatures_merkle_root: TMerkleRoot, +} + +impl + SignedCommitmentWitness +{ + /// Convert [SignedCommitment] into [SignedCommitmentWitness]. + /// + /// This takes a [SignedCommitment], which contains full signatures + /// and converts it into a witness form, which does not contain full signatures, + /// only a bit vector indicating which validators have signed the original [SignedCommitment] + /// and a merkle root of all signatures. + /// + /// Returns the full list of signatures along with the witness. + pub fn from_signed( + signed: SignedCommitment, + merkelize: TMerkelize, + ) -> (Self, Vec>) + where + TMerkelize: FnOnce(&[Option]) -> TMerkleRoot, + { + let SignedCommitment { commitment, signatures } = signed; + let signed_by = signatures.iter().map(|s| s.is_some()).collect(); + let signatures_merkle_root = merkelize(&signatures); + + (Self { commitment, signed_by, signatures_merkle_root }, signatures) + } +} + +#[cfg(test)] +mod tests { + + use sp_core::{keccak_256, Pair}; + use sp_keystore::{testing::KeyStore, SyncCryptoStore, SyncCryptoStorePtr}; + + use super::*; + use codec::Decode; + + use crate::{crypto, KEY_TYPE}; + + type TestCommitment = Commitment; + type TestSignedCommitment = SignedCommitment; + type TestSignedCommitmentWitness = + SignedCommitmentWitness>>; + + // The mock signatures are equivalent to the ones produced by the BEEFY keystore + fn mock_signatures() -> (crypto::Signature, crypto::Signature) { + let store: SyncCryptoStorePtr = KeyStore::new().into(); + + let alice = sp_core::ecdsa::Pair::from_string("//Alice", None).unwrap(); + let _ = + SyncCryptoStore::insert_unknown(&*store, KEY_TYPE, "//Alice", alice.public().as_ref()) + .unwrap(); + + let msg = keccak_256(b"This is the first message"); + let sig1 = SyncCryptoStore::ecdsa_sign_prehashed(&*store, KEY_TYPE, &alice.public(), &msg) + .unwrap() + .unwrap(); + + let msg = keccak_256(b"This is the second message"); + let sig2 = SyncCryptoStore::ecdsa_sign_prehashed(&*store, KEY_TYPE, &alice.public(), &msg) + .unwrap() + .unwrap(); + + (sig1.into(), sig2.into()) + } + + fn signed_commitment() -> TestSignedCommitment { + let commitment: TestCommitment = + Commitment { payload: "Hello World!".into(), block_number: 5, validator_set_id: 0 }; + + let sigs = mock_signatures(); + + SignedCommitment { commitment, signatures: vec![None, None, Some(sigs.0), Some(sigs.1)] } + } + + #[test] + fn should_convert_signed_commitment_to_witness() { + // given + let signed = signed_commitment(); + + // when + let (witness, signatures) = + TestSignedCommitmentWitness::from_signed(signed, |sigs| sigs.to_vec()); + + // then + assert_eq!(witness.signatures_merkle_root, signatures); + } + + #[test] + fn should_encode_and_decode_witness() { + // given + let signed = signed_commitment(); + let (witness, _) = TestSignedCommitmentWitness::from_signed(signed, |sigs| sigs.to_vec()); + + // when + let encoded = codec::Encode::encode(&witness); + let decoded = TestSignedCommitmentWitness::decode(&mut &*encoded); + + // then + assert_eq!(decoded, Ok(witness)); + assert_eq!( + encoded, + hex_literal::hex!( + "3048656c6c6f20576f726c64210500000000000000000000000000000000000000000000001000 + 00010110000001558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e9 + 9a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01012d6e1f8105c337a86cdd9aaacdc496577f3db8c55ef9e6fd + 48f2c5c05a2274707491635d8ba3df64f324575b7b2a34487bca2324b6a0046395a71681be3d0c2a00" + ) + ); + } +} diff --git a/primitives/consensus/babe/src/digests.rs b/primitives/consensus/babe/src/digests.rs index 470a028021ca1..1c908fe61fc0b 100644 --- a/primitives/consensus/babe/src/digests.rs +++ b/primitives/consensus/babe/src/digests.rs @@ -21,7 +21,7 @@ use super::{ AllowedSlots, AuthorityId, AuthorityIndex, AuthoritySignature, BabeAuthorityWeight, BabeEpochConfiguration, Slot, BABE_ENGINE_ID, }; -use codec::{Codec, Decode, Encode}; +use codec::{Codec, Decode, Encode, MaxEncodedLen}; use sp_runtime::{DigestItem, RuntimeDebug}; use sp_std::vec::Vec; @@ -134,7 +134,9 @@ pub struct NextEpochDescriptor { /// Information about the next epoch config, if changed. This is broadcast in the first /// block of the epoch, and applies using the same rules as `NextEpochDescriptor`. -#[derive(Decode, Encode, PartialEq, Eq, Clone, RuntimeDebug, scale_info::TypeInfo)] +#[derive( + Decode, Encode, PartialEq, Eq, Clone, RuntimeDebug, MaxEncodedLen, scale_info::TypeInfo, +)] pub enum NextConfigDescriptor { /// Version 1. #[codec(index = 1)] diff --git a/primitives/consensus/babe/src/lib.rs b/primitives/consensus/babe/src/lib.rs index 4417670f4144b..560866cfb2ab5 100644 --- a/primitives/consensus/babe/src/lib.rs +++ b/primitives/consensus/babe/src/lib.rs @@ -28,7 +28,7 @@ pub use sp_consensus_vrf::schnorrkel::{ Randomness, RANDOMNESS_LENGTH, VRF_OUTPUT_LENGTH, VRF_PROOF_LENGTH, }; -use codec::{Decode, Encode}; +use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; @@ -214,7 +214,7 @@ pub struct BabeGenesisConfiguration { } /// Types of allowed slots. -#[derive(Clone, Copy, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] +#[derive(Clone, Copy, PartialEq, Eq, Encode, Decode, RuntimeDebug, MaxEncodedLen, TypeInfo)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub enum AllowedSlots { /// Only allow primary slots. @@ -247,7 +247,7 @@ impl sp_consensus::SlotData for BabeGenesisConfiguration { } /// Configuration data used by the BABE consensus engine. -#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] +#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, MaxEncodedLen, TypeInfo)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub struct BabeEpochConfiguration { /// A constant value that is used in the threshold calculation formula. diff --git a/primitives/consensus/common/src/select_chain.rs b/primitives/consensus/common/src/select_chain.rs index 5408fc86b7bd4..fd8b06ecf8abb 100644 --- a/primitives/consensus/common/src/select_chain.rs +++ b/primitives/consensus/common/src/select_chain.rs @@ -50,7 +50,7 @@ pub trait SelectChain: Sync + Send + Clone { &self, target_hash: ::Hash, _maybe_max_number: Option>, - ) -> Result::Hash>, Error> { - Ok(Some(target_hash)) + ) -> Result<::Hash, Error> { + Ok(target_hash) } } diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index 4764a0cac1b14..a9f3e904a2a85 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -597,6 +597,8 @@ ss58_address_format!( (77, "manta", "Manta Network, standard account (*25519).") CalamariAccount => (78, "calamari", "Manta Canary Network, standard account (*25519).") + Polkadex => + (88, "polkadex", "Polkadex Mainnet, standard account (*25519).") PolkaSmith => (98, "polkasmith", "PolkaSmith Canary Network, standard account (*25519).") PolkaFoundry => @@ -615,8 +617,12 @@ ss58_address_format!( (1284, "moonbeam", "Moonbeam, session key (*25519).") Moonriver => (1285, "moonriver", "Moonriver, session key (*25519).") + Automata => + (2349, "automata", "Automata mainnet standard account (*25519).") BasiliskAccount => (10041, "basilisk", "Basilisk standard account (*25519).") + ContextFree => + (11820, "contextfree", "Automata ContextFree standard account (*25519).") // Note: 16384 and above are reserved. ); diff --git a/primitives/io/Cargo.toml b/primitives/io/Cargo.toml index d3a2b56705926..5a8c1c4af4f99 100644 --- a/primitives/io/Cargo.toml +++ b/primitives/io/Cargo.toml @@ -24,7 +24,6 @@ libsecp256k1 = { version = "0.6", optional = true } sp-state-machine = { version = "0.10.0-dev", optional = true, path = "../state-machine" } sp-wasm-interface = { version = "4.0.0-dev", path = "../wasm-interface", default-features = false } sp-runtime-interface = { version = "4.0.0-dev", default-features = false, path = "../runtime-interface" } -sp-maybe-compressed-blob = { version = "4.0.0-dev", optional = true, path = "../maybe-compressed-blob" } sp-trie = { version = "4.0.0-dev", optional = true, path = "../trie" } sp-externalities = { version = "0.10.0-dev", optional = true, path = "../externalities" } sp-tracing = { version = "4.0.0-dev", default-features = false, path = "../tracing" } @@ -48,7 +47,6 @@ std = [ "sp-runtime-interface/std", "sp-externalities", "sp-wasm-interface/std", - "sp-maybe-compressed-blob", "sp-tracing/std", "tracing/std", "tracing-core/std", diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 5faeb59c72db6..78e6f0c847952 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -73,6 +73,7 @@ mod batch_verifier; #[cfg(feature = "std")] use batch_verifier::BatchVerifier; +#[cfg(feature = "std")] const LOG_TARGET: &str = "runtime::io"; /// Error verifying ECDSA signature @@ -1481,21 +1482,17 @@ mod allocator_impl { #[panic_handler] #[no_mangle] pub fn panic(info: &core::panic::PanicInfo) -> ! { - unsafe { - let message = sp_std::alloc::format!("{}", info); - logging::log(LogLevel::Error, "runtime", message.as_bytes()); - core::arch::wasm32::unreachable(); - } + let message = sp_std::alloc::format!("{}", info); + logging::log(LogLevel::Error, "runtime", message.as_bytes()); + core::arch::wasm32::unreachable(); } /// A default OOM handler for WASM environment. #[cfg(all(not(feature = "disable_oom"), not(feature = "std")))] #[alloc_error_handler] pub fn oom(_: core::alloc::Layout) -> ! { - unsafe { - logging::log(LogLevel::Error, "runtime", b"Runtime memory exhausted. Aborting"); - core::arch::wasm32::unreachable(); - } + logging::log(LogLevel::Error, "runtime", b"Runtime memory exhausted. Aborting"); + core::arch::wasm32::unreachable(); } /// Type alias for Externalities implementation used in tests. diff --git a/primitives/maybe-compressed-blob/Cargo.toml b/primitives/maybe-compressed-blob/Cargo.toml index 8d47c89ea8ebe..6994ccf5486ba 100644 --- a/primitives/maybe-compressed-blob/Cargo.toml +++ b/primitives/maybe-compressed-blob/Cargo.toml @@ -11,4 +11,4 @@ documentation = "https://docs.rs/sp-maybe-compressed-blob" readme = "README.md" [dependencies] -zstd = { version = "0.6.0", default-features = false } +zstd = { version = "0.9.0", default-features = false } diff --git a/primitives/npos-elections/src/traits.rs b/primitives/npos-elections/src/traits.rs index 45b6fa368ae2a..597d7e648fd9b 100644 --- a/primitives/npos-elections/src/traits.rs +++ b/primitives/npos-elections/src/traits.rs @@ -10,8 +10,8 @@ // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. @@ -22,6 +22,7 @@ use crate::{ VoteWeight, }; use codec::Encode; +use scale_info::TypeInfo; use sp_arithmetic::{ traits::{Bounded, UniqueSaturatedInto}, PerThing, @@ -72,7 +73,8 @@ where + Copy + Clone + Bounded - + Encode; + + Encode + + TypeInfo; /// The target type. Needs to be an index (convert to usize). type TargetIndex: UniqueSaturatedInto @@ -82,7 +84,8 @@ where + Copy + Clone + Bounded - + Encode; + + Encode + + TypeInfo; /// The weight/accuracy type of each vote. type Accuracy: PerThing128; diff --git a/primitives/runtime-interface/tests/ui/pass_by_enum_with_struct.stderr b/primitives/runtime-interface/tests/ui/pass_by_enum_with_struct.stderr index c7ed1af3b1a03..44fb5a244e03d 100644 --- a/primitives/runtime-interface/tests/ui/pass_by_enum_with_struct.stderr +++ b/primitives/runtime-interface/tests/ui/pass_by_enum_with_struct.stderr @@ -4,4 +4,4 @@ error: `PassByEnum` only supports enums as input type. 3 | #[derive(PassByEnum)] | ^^^^^^^^^^ | - = note: this error originates in a derive macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the derive macro `PassByEnum` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/primitives/runtime-interface/tests/ui/pass_by_enum_with_value_variant.stderr b/primitives/runtime-interface/tests/ui/pass_by_enum_with_value_variant.stderr index f6c85ed2bba3e..633dc3bbe8bc4 100644 --- a/primitives/runtime-interface/tests/ui/pass_by_enum_with_value_variant.stderr +++ b/primitives/runtime-interface/tests/ui/pass_by_enum_with_value_variant.stderr @@ -4,4 +4,4 @@ error: `PassByEnum` only supports unit variants. 3 | #[derive(PassByEnum)] | ^^^^^^^^^^ | - = note: this error originates in a derive macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the derive macro `PassByEnum` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/primitives/runtime-interface/tests/ui/pass_by_inner_with_two_fields.stderr b/primitives/runtime-interface/tests/ui/pass_by_inner_with_two_fields.stderr index 9afbce76f0c23..0ffee00210e79 100644 --- a/primitives/runtime-interface/tests/ui/pass_by_inner_with_two_fields.stderr +++ b/primitives/runtime-interface/tests/ui/pass_by_inner_with_two_fields.stderr @@ -4,4 +4,4 @@ error: Only newtype/one field structs are supported by `PassByInner`! 3 | #[derive(PassByInner)] | ^^^^^^^^^^^ | - = note: this error originates in a derive macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the derive macro `PassByInner` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/primitives/runtime/src/testing.rs b/primitives/runtime/src/testing.rs index 781f342d43c1e..fe9ba588adb87 100644 --- a/primitives/runtime/src/testing.rs +++ b/primitives/runtime/src/testing.rs @@ -18,7 +18,7 @@ //! Testing utilities. use crate::{ - codec::{Codec, Decode, Encode}, + codec::{Codec, Decode, Encode, MaxEncodedLen}, generic, scale_info::TypeInfo, traits::{ @@ -59,6 +59,7 @@ use std::{ Deserialize, PartialOrd, Ord, + MaxEncodedLen, TypeInfo, )] pub struct UintAuthorityId(pub u64); diff --git a/primitives/runtime/src/traits.rs b/primitives/runtime/src/traits.rs index 65c063fde1696..6d79d740dc4e1 100644 --- a/primitives/runtime/src/traits.rs +++ b/primitives/runtime/src/traits.rs @@ -1474,6 +1474,7 @@ macro_rules! impl_opaque_keys { #[macro_export] #[cfg(not(feature = "std"))] +#[doc(hidden)] macro_rules! impl_opaque_keys { { $( #[ $attr:meta ] )* diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml index 457bbac5d2640..e444ae223a742 100644 --- a/primitives/state-machine/Cargo.toml +++ b/primitives/state-machine/Cargo.toml @@ -27,7 +27,7 @@ codec = { package = "parity-scale-codec", version = "2.0.0", default-features = num-traits = { version = "0.2.8", default-features = false } rand = { version = "0.7.2", optional = true } sp-externalities = { version = "0.10.0-dev", path = "../externalities", default-features = false } -smallvec = "1.4.1" +smallvec = "1.7.0" sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" } tracing = { version = "0.1.22", optional = true } @@ -35,7 +35,7 @@ tracing = { version = "0.1.22", optional = true } hex-literal = "0.3.1" sp-runtime = { version = "4.0.0-dev", path = "../runtime" } pretty_assertions = "0.6.1" -rand = { version = "0.7.2" } +rand = "0.7.2" [features] default = ["std"] diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 1b1a732f8d0fc..7dcf92b06de06 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -21,9 +21,9 @@ use crate::{ trie_backend::TrieBackend, trie_backend_essence::TrieBackendStorage, ChildStorageCollection, StorageCollection, StorageKey, StorageValue, UsageInfo, }; -use codec::{Decode, Encode}; +use codec::Encode; use hash_db::Hasher; -use sp_core::storage::{well_known_keys, ChildInfo, TrackedStorageKey}; +use sp_core::storage::{ChildInfo, TrackedStorageKey}; #[cfg(feature = "std")] use sp_core::traits::RuntimeCode; use sp_std::vec::Vec; @@ -330,7 +330,11 @@ impl<'a, B: Backend, H: Hasher> sp_core::traits::FetchRuntimeCode for BackendRuntimeCode<'a, B, H> { fn fetch_runtime_code<'b>(&'b self) -> Option> { - self.backend.storage(well_known_keys::CODE).ok().flatten().map(Into::into) + self.backend + .storage(sp_core::storage::well_known_keys::CODE) + .ok() + .flatten() + .map(Into::into) } } @@ -348,17 +352,17 @@ where pub fn runtime_code(&self) -> Result { let hash = self .backend - .storage_hash(well_known_keys::CODE) + .storage_hash(sp_core::storage::well_known_keys::CODE) .ok() .flatten() .ok_or("`:code` hash not found")? .encode(); let heap_pages = self .backend - .storage(well_known_keys::HEAP_PAGES) + .storage(sp_core::storage::well_known_keys::HEAP_PAGES) .ok() .flatten() - .and_then(|d| Decode::decode(&mut &d[..]).ok()); + .and_then(|d| codec::Decode::decode(&mut &d[..]).ok()); Ok(RuntimeCode { code_fetcher: self, hash, heap_pages }) } diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index c9693ca6a88c1..c20d8492fb1f3 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -17,17 +17,15 @@ //! Concrete externalities implementation. -use crate::{ - backend::Backend, overlayed_changes::OverlayedExtensions, IndexOperation, OverlayedChanges, - StorageKey, StorageValue, -}; +#[cfg(feature = "std")] +use crate::overlayed_changes::OverlayedExtensions; +use crate::{backend::Backend, IndexOperation, OverlayedChanges, StorageKey, StorageValue}; use codec::{Decode, Encode, EncodeAppend}; use hash_db::Hasher; -use sp_core::{ - hexdisplay::HexDisplay, - storage::{well_known_keys::is_child_storage_key, ChildInfo, TrackedStorageKey}, -}; -use sp_externalities::{Extension, ExtensionStore, Extensions, Externalities}; +#[cfg(feature = "std")] +use sp_core::hexdisplay::HexDisplay; +use sp_core::storage::{well_known_keys::is_child_storage_key, ChildInfo, TrackedStorageKey}; +use sp_externalities::{Extension, ExtensionStore, Externalities}; use sp_trie::{empty_child_trie_root, trie_types::Layout}; #[cfg(feature = "std")] @@ -37,7 +35,7 @@ use sp_std::{ any::{Any, TypeId}, boxed::Box, cmp::Ordering, - fmt, vec, + vec, vec::Vec, }; #[cfg(feature = "std")] @@ -72,8 +70,8 @@ pub enum Error { } #[cfg(feature = "std")] -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +impl std::fmt::Display for Error { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match *self { Error::Backend(ref e) => write!(f, "Storage backend error: {}", e), Error::Executor(ref e) => write!(f, "Sub-call execution error: {}", e), @@ -139,7 +137,7 @@ where storage_transaction_cache: &'a mut StorageTransactionCache, backend: &'a B, changes_trie_state: Option>, - extensions: Option<&'a mut Extensions>, + extensions: Option<&'a mut sp_externalities::Extensions>, ) -> Self { Self { overlay, diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 05d2c6d20ccee..7bd0c645f3c00 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -55,11 +55,19 @@ pub use tracing::trace; #[cfg(not(feature = "std"))] #[macro_export] macro_rules! warn { - (target: $target:expr, $($arg:tt)+) => { - () + (target: $target:expr, $message:expr $( , $arg:ident )* $( , )?) => { + { + $( + let _ = &$arg; + )* + } }; - ($($arg:tt)+) => { - () + ($message:expr, $( $arg:expr, )*) => { + { + $( + let _ = &$arg; + )* + } }; } @@ -68,11 +76,12 @@ macro_rules! warn { #[cfg(not(feature = "std"))] #[macro_export] macro_rules! debug { - (target: $target:expr, $($arg:tt)+) => { - () - }; - ($($arg:tt)+) => { - () + (target: $target:expr, $message:expr $( , $arg:ident )* $( , )?) => { + { + $( + let _ = &$arg; + )* + } }; } diff --git a/primitives/state-machine/src/overlayed_changes/mod.rs b/primitives/state-machine/src/overlayed_changes/mod.rs index a0558e06a380e..cf7af1c9a6f3a 100644 --- a/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/primitives/state-machine/src/overlayed_changes/mod.rs @@ -21,15 +21,7 @@ mod changeset; mod offchain; use self::changeset::OverlayedChangeSet; -use crate::{backend::Backend, stats::StateMachineStats}; -pub use offchain::OffchainOverlayedChanges; -use sp_std::{ - any::{Any, TypeId}, - boxed::Box, - vec::Vec, -}; - -use crate::{changes_trie::BlockNumber, DefaultError}; +use crate::{backend::Backend, changes_trie::BlockNumber, stats::StateMachineStats, DefaultError}; #[cfg(feature = "std")] use crate::{ changes_trie::{build_changes_trie, State as ChangesTrieState}, @@ -37,16 +29,23 @@ use crate::{ }; use codec::{Decode, Encode}; use hash_db::Hasher; +pub use offchain::OffchainOverlayedChanges; use sp_core::{ offchain::OffchainOverlayedChange, storage::{well_known_keys::EXTRINSIC_INDEX, ChildInfo}, }; +#[cfg(feature = "std")] use sp_externalities::{Extension, Extensions}; #[cfg(not(feature = "std"))] -use sp_std::collections::btree_map::{BTreeMap as Map, Entry as MapEntry}; -use sp_std::collections::btree_set::BTreeSet; +use sp_std::collections::btree_map::BTreeMap as Map; +use sp_std::{collections::btree_set::BTreeSet, vec::Vec}; #[cfg(feature = "std")] use std::collections::{hash_map::Entry as MapEntry, HashMap as Map}; +#[cfg(feature = "std")] +use std::{ + any::{Any, TypeId}, + boxed::Box, +}; pub use self::changeset::{AlreadyInRuntime, NoOpenTransaction, NotInRuntime, OverlayedValue}; @@ -581,6 +580,8 @@ impl OverlayedChanges { self.changes_trie_root(backend, changes_trie_state, parent_hash, false, &mut cache) .map_err(|_| "Failed to generate changes trie transaction")?; } + #[cfg(not(feature = "std"))] + let _ = parent_hash; #[cfg(feature = "std")] let changes_trie_transaction = cache @@ -758,6 +759,7 @@ where /// An overlayed extension is either a mutable reference /// or an owned extension. +#[cfg(feature = "std")] pub enum OverlayedExtension<'a> { MutRef(&'a mut Box), Owned(Box), @@ -770,10 +772,12 @@ pub enum OverlayedExtension<'a> { /// as owned references. After the execution of a runtime function, we /// can safely drop this object while not having modified the original /// list. +#[cfg(feature = "std")] pub struct OverlayedExtensions<'a> { extensions: Map>, } +#[cfg(feature = "std")] impl<'a> OverlayedExtensions<'a> { /// Create a new instance of overalyed extensions from the given extensions. pub fn new(extensions: &'a mut Extensions) -> Self { diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index ec1772ba8666f..23f66ee14d87e 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -159,7 +159,7 @@ where /// /// In contrast to [`commit_all`](Self::commit_all) this will not panic if there are open /// transactions. - fn as_backend(&self) -> InMemoryBackend { + pub fn as_backend(&self) -> InMemoryBackend { let top: Vec<_> = self.overlay.changes().map(|(k, v)| (k.clone(), v.value().cloned())).collect(); let mut transaction = vec![(None, top)]; diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 557a098fbaf79..6c575f0d76bc7 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -24,7 +24,7 @@ use hash_db::{self, Hasher, Prefix}; #[cfg(feature = "std")] use parking_lot::RwLock; use sp_core::storage::ChildInfo; -use sp_std::{boxed::Box, ops::Deref, vec::Vec}; +use sp_std::{boxed::Box, vec::Vec}; use sp_trie::{ empty_child_trie_root, read_child_trie_value, read_trie_value, trie_types::{Layout, TrieDB, TrieError}, @@ -37,8 +37,11 @@ use std::sync::Arc; #[cfg(not(feature = "std"))] macro_rules! format { - ($($arg:tt)+) => { - crate::DefaultError + ( $message:expr, $( $arg:expr )* ) => { + { + $( let _ = &$arg; )* + crate::DefaultError + } }; } @@ -488,7 +491,7 @@ impl TrieBackendStorage for Arc> { type Overlay = PrefixedMemoryDB; fn get(&self, key: &H::Out, prefix: Prefix) -> Result> { - Storage::::get(self.deref(), key, prefix) + Storage::::get(std::ops::Deref::deref(self), key, prefix) } } diff --git a/shell.nix b/shell.nix index 9a2d30400631f..a86af005383f7 100644 --- a/shell.nix +++ b/shell.nix @@ -5,7 +5,7 @@ let rev = "4a07484cf0e49047f82d83fd119acffbad3b235f"; }); nixpkgs = import { overlays = [ mozillaOverlay ]; }; - rust-nightly = with nixpkgs; ((rustChannelOf { date = "2021-07-06"; channel = "nightly"; }).rust.override { + rust-nightly = with nixpkgs; ((rustChannelOf { date = "2021-09-10"; channel = "nightly"; }).rust.override { extensions = [ "rust-src" ]; targets = [ "wasm32-unknown-unknown" ]; }); diff --git a/ss58-registry.json b/ss58-registry.json index 563cc248db9dd..62602d829f6b8 100644 --- a/ss58-registry.json +++ b/ss58-registry.json @@ -532,6 +532,15 @@ "standardAccount": "*25519", "website": "https://manta.network" }, + { + "prefix": 88, + "network": "polkadex", + "displayName": "Polkadex Mainnet", + "symbols": ["PDEX"], + "decimals": [12], + "standardAccount": "*25519", + "website": "https://polkadex.trade" + }, { "prefix": 98, "network": "polkasmith", @@ -631,6 +640,15 @@ "standardAccount": "secp256k1", "website": "https://moonbeam.network" }, + { + "prefix": 2349, + "network": "automata", + "displayName": "Automata Mainnet", + "symbols": ["ATA"], + "decimals": [18], + "standardAccount": "*25519", + "website": "https://ata.network" + }, { "prefix": 10041, "network": "basilisk", @@ -639,6 +657,15 @@ "decimals": [12], "standardAccount": "*25519", "website": "https://bsx.fi" + }, + { + "prefix": 11820, + "network": "contextfree", + "displayName": "Automata ContextFree", + "symbols": ["CTX"], + "decimals": [18], + "standardAccount": "*25519", + "website": "https://ata.network" } ] } diff --git a/test-utils/runtime/client/src/trait_tests.rs b/test-utils/runtime/client/src/trait_tests.rs index c5e0ba49fcf5b..938aeda36d319 100644 --- a/test-utils/runtime/client/src/trait_tests.rs +++ b/test-utils/runtime/client/src/trait_tests.rs @@ -67,7 +67,6 @@ where .block; block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); - #[allow(deprecated)] assert_eq!(blockchain.leaves().unwrap(), vec![a2.hash()]); // A2 -> A3 diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 0d880d508ef38..943c41c247f75 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -36,7 +36,11 @@ use sp_trie::{ use trie_db::{Trie, TrieMut}; use cfg_if::cfg_if; -use frame_support::{parameter_types, traits::KeyOwnerProofSystem, weights::RuntimeDbWeight}; +use frame_support::{ + parameter_types, + traits::{CrateVersion, KeyOwnerProofSystem}, + weights::RuntimeDbWeight, +}; use frame_system::limits::{BlockLength, BlockWeights}; use sp_api::{decl_runtime_apis, impl_runtime_apis}; pub use sp_core::hash::H256; @@ -520,6 +524,35 @@ impl frame_support::traits::PalletInfo for Runtime { return Some("Babe") } + None + } + fn module_name() -> Option<&'static str> { + let type_id = sp_std::any::TypeId::of::

(); + if type_id == sp_std::any::TypeId::of::>() { + return Some("system") + } + if type_id == sp_std::any::TypeId::of::>() { + return Some("pallet_timestamp") + } + if type_id == sp_std::any::TypeId::of::>() { + return Some("pallet_babe") + } + + None + } + fn crate_version() -> Option { + use frame_support::traits::PalletInfoAccess as _; + let type_id = sp_std::any::TypeId::of::

(); + if type_id == sp_std::any::TypeId::of::>() { + return Some(system::Pallet::::crate_version()) + } + if type_id == sp_std::any::TypeId::of::>() { + return Some(pallet_timestamp::Pallet::::crate_version()) + } + if type_id == sp_std::any::TypeId::of::>() { + return Some(pallet_babe::Pallet::::crate_version()) + } + None } } @@ -574,6 +607,7 @@ impl pallet_timestamp::Config for Runtime { parameter_types! { pub const EpochDuration: u64 = 6; pub const ExpectedBlockTime: u64 = 10_000; + pub const MaxAuthorities: u32 = 10; } impl pallet_babe::Config for Runtime { @@ -596,8 +630,9 @@ impl pallet_babe::Config for Runtime { )>>::IdentificationTuple; type HandleEquivocation = (); - type WeightInfo = (); + + type MaxAuthorities = MaxAuthorities; } /// Adds one to the given input and returns the final result. diff --git a/utils/frame/benchmarking-cli/src/template.hbs b/utils/frame/benchmarking-cli/src/template.hbs index 4acb8c7baa23b..36abf27f59a6e 100644 --- a/utils/frame/benchmarking-cli/src/template.hbs +++ b/utils/frame/benchmarking-cli/src/template.hbs @@ -17,7 +17,7 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; -/// Weight functions for {{pallet}}. +/// Weight functions for `{{pallet}}`. pub struct WeightInfo(PhantomData); impl {{pallet}}::WeightInfo for WeightInfo { {{~#each benchmarks as |benchmark|}} diff --git a/utils/frame/generate-bags/Cargo.toml b/utils/frame/generate-bags/Cargo.toml new file mode 100644 index 0000000000000..384307fbec9e5 --- /dev/null +++ b/utils/frame/generate-bags/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "generate-bags" +version = "3.0.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Bag threshold generation script for pallet-bag-list" +readme = "README.md" + +[dependencies] +# FRAME +frame-support = { version = "4.0.0-dev", default-features = false, path = "../../../frame/support" } +frame-election-provider-support = { version = "4.0.0-dev", path = "../../../frame/election-provider-support", features = ["runtime-benchmarks"] } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../../../frame/system" } +pallet-staking = { version = "4.0.0-dev", default-features = false, path = "../../../frame/staking" } + +# primitives +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/io" } + +# third party +chrono = { version = "0.4.19" } +git2 = { version = "0.13.20", default-features = false } +num-format = { version = "0.4.0" } +structopt = "0.3.21" diff --git a/utils/frame/generate-bags/node-runtime/Cargo.toml b/utils/frame/generate-bags/node-runtime/Cargo.toml new file mode 100644 index 0000000000000..7fcd981a6bbd6 --- /dev/null +++ b/utils/frame/generate-bags/node-runtime/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "node-runtime-generate-bags" +version = "3.0.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Bag threshold generation script for pallet-bag-list and node-runtime." +readme = "README.md" + +[dependencies] +node-runtime = { version = "3.0.0-dev", path = "../../../../bin/node/runtime" } +generate-bags = { version = "3.0.0", path = "../" } + +# third-party +structopt = "0.3.21" diff --git a/utils/frame/generate-bags/node-runtime/src/main.rs b/utils/frame/generate-bags/node-runtime/src/main.rs new file mode 100644 index 0000000000000..5d36b381a7d0c --- /dev/null +++ b/utils/frame/generate-bags/node-runtime/src/main.rs @@ -0,0 +1,46 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Make the set of bag thresholds to be used with pallet-bags-list. + +use generate_bags::generate_thresholds; +use std::path::PathBuf; +use structopt::StructOpt; + +#[derive(Debug, StructOpt)] +struct Opt { + /// How many bags to generate. + #[structopt(long, default_value = "200")] + n_bags: usize, + + /// Where to write the output. + output: PathBuf, + + /// The total issuance of the currency used to create `VoteWeight`. + #[structopt(short, long)] + total_issuance: u128, + + /// The minimum account balance (i.e. existential deposit) for the currency used to create + /// `VoteWeight`. + #[structopt(short, long)] + minimum_balance: u128, +} + +fn main() -> Result<(), std::io::Error> { + let Opt { n_bags, output, total_issuance, minimum_balance } = Opt::from_args(); + generate_thresholds::(n_bags, &output, total_issuance, minimum_balance) +} diff --git a/utils/frame/generate-bags/src/lib.rs b/utils/frame/generate-bags/src/lib.rs new file mode 100644 index 0000000000000..af9df4435bcab --- /dev/null +++ b/utils/frame/generate-bags/src/lib.rs @@ -0,0 +1,246 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Support code to ease the process of generating bag thresholds. +//! +//! NOTE: this assume the runtime implements [`pallet_staking::Config`], as it requires an +//! implementation of the traits [`frame_support::traits::Currency`] and +//! [`frame_support::traits::CurrencyToVote`]. +//! +//! The process of adding bags to a runtime requires only four steps. +//! +//! 1. Update the runtime definition. +//! +//! ```ignore +//! parameter_types!{ +//! pub const BagThresholds: &'static [u64] = &[]; +//! } +//! +//! impl pallet_bags_list::Config for Runtime { +//! // +//! type BagThresholds = BagThresholds; +//! } +//! ``` +//! +//! 2. Write a little program to generate the definitions. This program exists only to hook together +//! the runtime definitions with the various calculations here. Take a look at +//! _utils/frame/generate_bags/node-runtime_ for an example. +//! +//! 3. Run that program: +//! +//! ```sh,notrust +//! $ cargo run -p node-runtime-generate-bags -- --total-issuance 1234 --minimum-balance 1 +//! output.rs ``` +//! +//! 4. Update the runtime definition. +//! +//! ```diff,notrust +//! + mod output; +//! - pub const BagThresholds: &'static [u64] = &[]; +//! + pub const BagThresholds: &'static [u64] = &output::THRESHOLDS; +//! ``` + +use frame_election_provider_support::VoteWeight; +use frame_support::traits::Get; +use std::{ + io::Write, + path::{Path, PathBuf}, +}; + +/// Compute the existential weight for the specified configuration. +/// +/// Note that this value depends on the current issuance, a quantity known to change over time. +/// This makes the project of computing a static value suitable for inclusion in a static, +/// generated file _excitingly unstable_. +fn existential_weight( + total_issuance: u128, + minimum_balance: u128, +) -> VoteWeight { + use frame_support::traits::CurrencyToVote; + use std::convert::TryInto; + + T::CurrencyToVote::to_vote( + minimum_balance + .try_into() + .map_err(|_| "failed to convert minimum_balance to type Balance") + .unwrap(), + total_issuance + .try_into() + .map_err(|_| "failed to convert total_issuance to type Balance") + .unwrap(), + ) +} + +/// Return the path to a header file used in this repository if is exists. +/// +/// Just searches the git working directory root for files matching certain patterns; it's +/// pretty naive. +fn path_to_header_file() -> Option { + let repo = git2::Repository::open_from_env().ok()?; + let workdir = repo.workdir()?; + for file_name in &["HEADER-APACHE2", "HEADER-GPL3", "HEADER", "file_header.txt"] { + let path = workdir.join(file_name); + if path.exists() { + return Some(path) + } + } + None +} + +/// Create an underscore formatter: a formatter which inserts `_` every 3 digits of a number. +fn underscore_formatter() -> num_format::CustomFormat { + num_format::CustomFormat::builder() + .grouping(num_format::Grouping::Standard) + .separator("_") + .build() + .expect("format described here meets all constraints") +} + +/// Compute the constant ratio for the thresholds. +/// +/// This ratio ensures that each bag, with the possible exceptions of certain small ones and the +/// final one, is a constant multiple of the previous, while fully occupying the `VoteWeight` +/// space. +pub fn constant_ratio(existential_weight: VoteWeight, n_bags: usize) -> f64 { + ((VoteWeight::MAX as f64 / existential_weight as f64).ln() / ((n_bags - 1) as f64)).exp() +} + +/// Compute the list of bag thresholds. +/// +/// Returns a list of exactly `n_bags` elements, except in the case of overflow. +/// The first element is always `existential_weight`. +/// The last element is always `VoteWeight::MAX`. +/// +/// All other elements are computed from the previous according to the formula +/// `threshold[k + 1] = (threshold[k] * ratio).max(threshold[k] + 1); +pub fn thresholds( + existential_weight: VoteWeight, + constant_ratio: f64, + n_bags: usize, +) -> Vec { + const WEIGHT_LIMIT: f64 = VoteWeight::MAX as f64; + + let mut thresholds = Vec::with_capacity(n_bags); + + if n_bags > 1 { + thresholds.push(existential_weight); + } + + while n_bags > 0 && thresholds.len() < n_bags - 1 { + let last = thresholds.last().copied().unwrap_or(existential_weight); + let successor = (last as f64 * constant_ratio).round().max(last as f64 + 1.0); + if successor < WEIGHT_LIMIT { + thresholds.push(successor as VoteWeight); + } else { + eprintln!("unexpectedly exceeded weight limit; breaking threshold generation loop"); + break + } + } + + thresholds.push(VoteWeight::MAX); + + debug_assert_eq!(thresholds.len(), n_bags); + debug_assert!(n_bags == 0 || thresholds[0] == existential_weight); + debug_assert!(n_bags == 0 || thresholds[thresholds.len() - 1] == VoteWeight::MAX); + + thresholds +} + +/// Write a thresholds module to the path specified. +/// +/// Parameters: +/// - `n_bags` the number of bags to generate. +/// - `output` the path to write to; should terminate with a Rust module name, i.e. +/// `foo/bar/thresholds.rs`. +/// - `total_issuance` the total amount of the currency in the network. +/// - `minimum_balance` the minimum balance of the currency required for an account to exist (i.e. +/// existential deposit). +/// +/// This generated module contains, in order: +/// +/// - The contents of the header file in this repository's root, if found. +/// - Module documentation noting that this is autogenerated and when. +/// - Some associated constants. +/// - The constant array of thresholds. +pub fn generate_thresholds( + n_bags: usize, + output: &Path, + total_issuance: u128, + minimum_balance: u128, +) -> Result<(), std::io::Error> { + // ensure the file is accessable + if let Some(parent) = output.parent() { + if !parent.exists() { + std::fs::create_dir_all(parent)?; + } + } + + // copy the header file + if let Some(header_path) = path_to_header_file() { + std::fs::copy(header_path, output)?; + } + + // open an append buffer + let file = std::fs::OpenOptions::new().create(true).append(true).open(output)?; + let mut buf = std::io::BufWriter::new(file); + + // create underscore formatter and format buffer + let mut num_buf = num_format::Buffer::new(); + let format = underscore_formatter(); + + // module docs + let now = chrono::Utc::now(); + writeln!(buf)?; + writeln!(buf, "//! Autogenerated bag thresholds.")?; + writeln!(buf, "//!")?; + writeln!(buf, "//! Generated on {}", now.to_rfc3339())?; + writeln!( + buf, + "//! for the {} runtime.", + ::Version::get().spec_name, + )?; + + let existential_weight = existential_weight::(total_issuance, minimum_balance); + num_buf.write_formatted(&existential_weight, &format); + writeln!(buf)?; + writeln!(buf, "/// Existential weight for this runtime.")?; + writeln!(buf, "#[cfg(any(test, feature = \"std\"))]")?; + writeln!(buf, "#[allow(unused)]")?; + writeln!(buf, "pub const EXISTENTIAL_WEIGHT: u64 = {};", num_buf.as_str())?; + + // constant ratio + let constant_ratio = constant_ratio(existential_weight, n_bags); + writeln!(buf)?; + writeln!(buf, "/// Constant ratio between bags for this runtime.")?; + writeln!(buf, "#[cfg(any(test, feature = \"std\"))]")?; + writeln!(buf, "#[allow(unused)]")?; + writeln!(buf, "pub const CONSTANT_RATIO: f64 = {:.16};", constant_ratio)?; + + // thresholds + let thresholds = thresholds(existential_weight, constant_ratio, n_bags); + writeln!(buf)?; + writeln!(buf, "/// Upper thresholds delimiting the bag list.")?; + writeln!(buf, "pub const THRESHOLDS: [u64; {}] = [", thresholds.len())?; + for threshold in thresholds { + num_buf.write_formatted(&threshold, &format); + // u64::MAX, with spacers every 3 digits, is 26 characters wide + writeln!(buf, " {:>26},", num_buf.as_str())?; + } + writeln!(buf, "];")?; + + Ok(()) +} diff --git a/utils/frame/remote-externalities/Cargo.toml b/utils/frame/remote-externalities/Cargo.toml index d255499d6c3ad..ce774679f94c2 100644 --- a/utils/frame/remote-externalities/Cargo.toml +++ b/utils/frame/remote-externalities/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] jsonrpsee-ws-client = { version = "0.3.0", default-features = false, features = [ "tokio1", -] } +]} jsonrpsee-proc-macros = "0.3.0" env_logger = "0.9" diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs index addb3d1dd3c17..2052780286c66 100644 --- a/utils/frame/remote-externalities/src/lib.rs +++ b/utils/frame/remote-externalities/src/lib.rs @@ -112,8 +112,8 @@ pub struct OnlineConfig { pub at: Option, /// An optional state snapshot file to WRITE to, not for reading. Not written if set to `None`. pub state_snapshot: Option, - /// The modules to scrape. If empty, entire chain state will be scraped. - pub modules: Vec, + /// The pallets to scrape. If empty, entire chain state will be scraped. + pub pallets: Vec, /// Transport config. pub transport: Transport, } @@ -134,7 +134,7 @@ impl Default for OnlineConfig { transport: Transport { uri: DEFAULT_TARGET.to_owned(), client: None }, at: None, state_snapshot: None, - modules: vec![], + pallets: vec![], } } } @@ -360,9 +360,9 @@ impl Builder { .clone(); info!(target: LOG_TARGET, "scraping key-pairs from remote @ {:?}", at); - let mut keys_and_values = if config.modules.len() > 0 { + let mut keys_and_values = if config.pallets.len() > 0 { let mut filtered_kv = vec![]; - for f in config.modules.iter() { + for f in config.pallets.iter() { let hashed_prefix = StorageKey(twox_128(f.as_bytes()).to_vec()); let module_kv = self.rpc_get_pairs_paged(hashed_prefix.clone(), at).await?; info!( @@ -376,7 +376,7 @@ impl Builder { } filtered_kv } else { - info!(target: LOG_TARGET, "downloading data for all modules."); + info!(target: LOG_TARGET, "downloading data for all pallets."); self.rpc_get_pairs_paged(StorageKey(vec![]), at).await? }; @@ -482,12 +482,23 @@ impl Builder { self } + /// overwrite the `at` value, if `mode` is set to [`Mode::Online`]. + /// + /// noop if `mode` is [`Mode::Offline`] + pub fn overwrite_online_at(mut self, at: B::Hash) -> Self { + if let Mode::Online(mut online) = self.mode.clone() { + online.at = Some(at); + self.mode = Mode::Online(online); + } + self + } + /// Build the test externalities. pub async fn build(self) -> Result { let kv = self.pre_build().await?; let mut ext = TestExternalities::new_empty(); - debug!(target: LOG_TARGET, "injecting a total of {} keys", kv.len()); + info!(target: LOG_TARGET, "injecting a total of {} keys", kv.len()); for (k, v) in kv { let (k, v) = (k.0, v.0); // Insert the key,value pair into the test trie backend @@ -541,7 +552,7 @@ mod remote_tests { init_logger(); Builder::::new() .mode(Mode::Online(OnlineConfig { - modules: vec!["System".to_owned()], + pallets: vec!["System".to_owned()], ..Default::default() })) .build() @@ -555,7 +566,7 @@ mod remote_tests { init_logger(); Builder::::new() .mode(Mode::Online(OnlineConfig { - modules: vec![ + pallets: vec![ "Proxy".to_owned(), "Multisig".to_owned(), "PhragmenElection".to_owned(), @@ -583,7 +594,7 @@ mod remote_tests { init_logger(); Builder::::new() .mode(Mode::Online(OnlineConfig { - modules: vec!["PhragmenElection".to_owned()], + pallets: vec!["PhragmenElection".to_owned()], ..Default::default() })) .build() @@ -609,7 +620,7 @@ mod remote_tests { Builder::::new() .mode(Mode::Online(OnlineConfig { state_snapshot: Some(SnapshotConfig::new("test_snapshot_to_remove.bin")), - modules: vec!["Balances".to_owned()], + pallets: vec!["Balances".to_owned()], ..Default::default() })) .build() diff --git a/utils/frame/try-runtime/cli/Cargo.toml b/utils/frame/try-runtime/cli/Cargo.toml index 5cc5ae6ee58bb..11b899db4ca47 100644 --- a/utils/frame/try-runtime/cli/Cargo.toml +++ b/utils/frame/try-runtime/cli/Cargo.toml @@ -25,6 +25,12 @@ sc-chain-spec = { version = "4.0.0-dev", path = "../../../../client/chain-spec" sp-state-machine = { version = "0.10.0-dev", path = "../../../../primitives/state-machine" } sp-runtime = { version = "4.0.0-dev", path = "../../../../primitives/runtime" } sp-core = { version = "4.0.0-dev", path = "../../../../primitives/core" } +sp-io = { version = "4.0.0-dev", path = "../../../../primitives/io" } sp-keystore = { version = "0.10.0-dev", path = "../../../../primitives/keystore" } +sp-externalities = { version = "0.10.0-dev", path = "../../../../primitives/externalities" } +sp-version = { version = "4.0.0-dev", path = "../../../../primitives/version" } remote-externalities = { version = "0.10.0-dev", path = "../../remote-externalities" } +jsonrpsee-ws-client = { version = "0.3.0", default-features = false, features = [ + "tokio1", +]} diff --git a/utils/frame/try-runtime/cli/src/commands/execute_block.rs b/utils/frame/try-runtime/cli/src/commands/execute_block.rs new file mode 100644 index 0000000000000..19422db90119f --- /dev/null +++ b/utils/frame/try-runtime/cli/src/commands/execute_block.rs @@ -0,0 +1,182 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{ + build_executor, ensure_matching_spec, extract_code, full_extensions, hash_of, local_spec, + state_machine_call, SharedParams, State, LOG_TARGET, +}; +use remote_externalities::rpc_api; +use sc_service::{Configuration, NativeExecutionDispatch}; +use sp_core::storage::well_known_keys; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; +use std::{fmt::Debug, str::FromStr}; + +/// Configurations of the [`Command::ExecuteBlock`]. +#[derive(Debug, Clone, structopt::StructOpt)] +pub struct ExecuteBlockCmd { + /// Overwrite the wasm code in state or not. + #[structopt(long)] + overwrite_wasm_code: bool, + + /// If set, then the state root check is disabled by the virtue of calling into + /// `TryRuntime_execute_block_no_check` instead of + /// `Core_execute_block`. + #[structopt(long)] + no_check: bool, + + /// The block hash at which to fetch the block. + /// + /// If the `live` state type is being used, then this can be omitted, and is equal to whatever + /// the `state::at` is. Only use this (with care) when combined with a snapshot. + #[structopt( + long, + multiple = false, + parse(try_from_str = crate::parse::hash) + )] + block_at: Option, + + /// The ws uri from which to fetch the block. + /// + /// If the `live` state type is being used, then this can be omitted, and is equal to whatever + /// the `state::uri` is. Only use this (with care) when combined with a snapshot. + #[structopt( + long, + multiple = false, + parse(try_from_str = crate::parse::url) + )] + block_ws_uri: Option, + + /// The state type to use. + /// + /// For this command only, if the `live` is used, then state of the parent block is fetched. + /// + /// If `block_at` is provided, then the [`State::Live::at`] is being ignored. + #[structopt(subcommand)] + state: State, +} + +impl ExecuteBlockCmd { + fn block_at(&self) -> sc_cli::Result + where + Block::Hash: FromStr, + ::Err: Debug, + { + match (&self.block_at, &self.state) { + (Some(block_at), State::Snap { .. }) => hash_of::(&block_at), + (Some(block_at), State::Live { .. }) => { + log::warn!(target: LOG_TARGET, "--block-at is provided while state type is live. the `Live::at` will be ignored"); + hash_of::(&block_at) + }, + (None, State::Live { at: Some(at), .. }) => hash_of::(&at), + _ => { + panic!("either `--block-at` must be provided, or state must be `live with a proper `--at``"); + }, + } + } + + fn block_ws_uri(&self) -> String + where + Block::Hash: FromStr, + ::Err: Debug, + { + match (&self.block_ws_uri, &self.state) { + (Some(block_ws_uri), State::Snap { .. }) => block_ws_uri.to_owned(), + (Some(block_ws_uri), State::Live { .. }) => { + log::error!(target: LOG_TARGET, "--block-uri is provided while state type is live, Are you sure you know what you are doing?"); + block_ws_uri.to_owned() + }, + (None, State::Live { uri, .. }) => uri.clone(), + (None, State::Snap { .. }) => { + panic!("either `--block-uri` must be provided, or state must be `live`"); + }, + } + } +} + +pub(crate) async fn execute_block( + shared: SharedParams, + command: ExecuteBlockCmd, + config: Configuration, +) -> sc_cli::Result<()> +where + Block: BlockT + serde::de::DeserializeOwned, + Block::Hash: FromStr, + ::Err: Debug, + NumberFor: FromStr, + as FromStr>::Err: Debug, + ExecDispatch: NativeExecutionDispatch + 'static, +{ + let executor = build_executor::(&shared, &config); + let execution = shared.execution; + + let block_at = command.block_at::()?; + let block_ws_uri = command.block_ws_uri::(); + let block: Block = rpc_api::get_block::(block_ws_uri.clone(), block_at).await?; + let parent_hash = block.header().parent_hash(); + log::info!( + target: LOG_TARGET, + "fetched block from {:?}, parent_hash to fetch the state {:?}", + block_ws_uri, + parent_hash + ); + + let ext = { + let builder = command + .state + .builder::()? + // make sure the state is being build with the parent hash, if it is online. + .overwrite_online_at(parent_hash.to_owned()); + + let builder = if command.overwrite_wasm_code { + let (code_key, code) = extract_code(&config.chain_spec)?; + builder.inject_key_value(&[(code_key, code)]) + } else { + builder.inject_hashed_key(well_known_keys::CODE) + }; + + builder.build().await? + }; + + // A digest item gets added when the runtime is processing the block, so we need to pop + // the last one to be consistent with what a gossiped block would contain. + let (mut header, extrinsics) = block.deconstruct(); + header.digest_mut().pop(); + let block = Block::new(header, extrinsics); + + let (expected_spec_name, expected_spec_version) = + local_spec::(&ext, &executor); + ensure_matching_spec::( + block_ws_uri.clone(), + expected_spec_name, + expected_spec_version, + shared.no_spec_name_check, + ) + .await; + + let _ = state_machine_call::( + &ext, + &executor, + execution, + if command.no_check { "TryRuntime_execute_block_no_check" } else { "Core_execute_block" }, + block.encode().as_ref(), + full_extensions(), + )?; + + log::info!(target: LOG_TARGET, "Core_execute_block executed without errors."); + + Ok(()) +} diff --git a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs new file mode 100644 index 0000000000000..0526f5d327fb2 --- /dev/null +++ b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs @@ -0,0 +1,176 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{ + build_executor, ensure_matching_spec, extract_code, full_extensions, local_spec, parse, + state_machine_call, SharedParams, LOG_TARGET, +}; +use jsonrpsee_ws_client::{ + types::{traits::SubscriptionClient, v2::params::JsonRpcParams, Subscription}, + WsClientBuilder, +}; +use parity_scale_codec::Decode; +use remote_externalities::{rpc_api, Builder, Mode, OnlineConfig}; +use sc_executor::NativeExecutionDispatch; +use sc_service::Configuration; +use sp_core::H256; +use sp_runtime::traits::{Block as BlockT, Header, NumberFor}; +use std::{fmt::Debug, str::FromStr}; + +const SUB: &'static str = "chain_subscribeFinalizedHeads"; +const UN_SUB: &'static str = "chain_unsubscribeFinalizedHeads"; + +/// Configurations of the [`Command::FollowChain`]. +#[derive(Debug, Clone, structopt::StructOpt)] +pub struct FollowChainCmd { + /// The url to connect to. + #[structopt( + short, + long, + parse(try_from_str = parse::url), + )] + uri: String, +} + +pub(crate) async fn follow_chain( + shared: SharedParams, + command: FollowChainCmd, + config: Configuration, +) -> sc_cli::Result<()> +where + Block: BlockT + serde::de::DeserializeOwned, + Block::Hash: FromStr, + Block::Header: serde::de::DeserializeOwned, + ::Err: Debug, + NumberFor: FromStr, + as FromStr>::Err: Debug, + ExecDispatch: NativeExecutionDispatch + 'static, +{ + let mut maybe_state_ext = None; + + let client = WsClientBuilder::default() + .connection_timeout(std::time::Duration::new(20, 0)) + .max_notifs_per_subscription(1024) + .max_request_body_size(u32::MAX) + .build(&command.uri) + .await + .unwrap(); + + log::info!(target: LOG_TARGET, "subscribing to {:?} / {:?}", SUB, UN_SUB); + let mut subscription: Subscription = + client.subscribe(&SUB, JsonRpcParams::NoParams, &UN_SUB).await.unwrap(); + + let (code_key, code) = extract_code(&config.chain_spec)?; + let executor = build_executor::(&shared, &config); + let execution = shared.execution; + + loop { + let header = match subscription.next().await { + Ok(Some(header)) => header, + Ok(None) => { + log::warn!("subscription returned `None`. Probably decoding has failed."); + break + }, + Err(why) => { + log::warn!("subscription returned error: {:?}.", why); + continue + }, + }; + + let hash = header.hash(); + let number = header.number(); + + let block = rpc_api::get_block::(&command.uri, hash).await.unwrap(); + + log::debug!( + target: LOG_TARGET, + "new block event: {:?} => {:?}, extrinsics: {}", + hash, + number, + block.extrinsics().len() + ); + + // create an ext at the state of this block, whatever is the first subscription event. + if maybe_state_ext.is_none() { + let builder = Builder::::new().mode(Mode::Online(OnlineConfig { + transport: command.uri.clone().into(), + at: Some(header.parent_hash().clone()), + ..Default::default() + })); + + let new_ext = + builder.inject_key_value(&[(code_key.clone(), code.clone())]).build().await?; + log::info!( + target: LOG_TARGET, + "initialized state externalities at {:?}, storage root {:?}", + number, + new_ext.as_backend().root() + ); + + let (expected_spec_name, expected_spec_version) = + local_spec::(&new_ext, &executor); + ensure_matching_spec::( + command.uri.clone(), + expected_spec_name, + expected_spec_version, + shared.no_spec_name_check, + ) + .await; + + maybe_state_ext = Some(new_ext); + } + + let state_ext = + maybe_state_ext.as_mut().expect("state_ext either existed or was just created"); + + let (mut changes, encoded_result) = state_machine_call::( + &state_ext, + &executor, + execution, + "TryRuntime_execute_block_no_check", + block.encode().as_ref(), + full_extensions(), + )?; + + let consumed_weight = ::decode(&mut &*encoded_result) + .map_err(|e| format!("failed to decode output: {:?}", e))?; + + let storage_changes = changes + .drain_storage_changes::<_, _, NumberFor>( + &state_ext.backend, + None, + Default::default(), + &mut Default::default(), + ) + .unwrap(); + state_ext.backend.apply_transaction( + storage_changes.transaction_storage_root, + storage_changes.transaction, + ); + + log::info!( + target: LOG_TARGET, + "executed block {}, consumed weight {}, new storage root {:?}", + number, + consumed_weight, + state_ext.as_backend().root(), + ); + } + + log::error!(target: LOG_TARGET, "ws subscription must have terminated."); + Ok(()) +} diff --git a/utils/frame/try-runtime/cli/src/commands/mod.rs b/utils/frame/try-runtime/cli/src/commands/mod.rs new file mode 100644 index 0000000000000..bfd8290fb31c1 --- /dev/null +++ b/utils/frame/try-runtime/cli/src/commands/mod.rs @@ -0,0 +1,21 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub(crate) mod execute_block; +pub(crate) mod follow_chain; +pub(crate) mod offchain_worker; +pub(crate) mod on_runtime_upgrade; diff --git a/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs b/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs new file mode 100644 index 0000000000000..6f37e4b3849fa --- /dev/null +++ b/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs @@ -0,0 +1,165 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{ + build_executor, ensure_matching_spec, extract_code, full_extensions, hash_of, local_spec, + parse, state_machine_call, SharedParams, State, LOG_TARGET, +}; +use parity_scale_codec::Encode; +use remote_externalities::rpc_api; +use sc_executor::NativeExecutionDispatch; +use sc_service::Configuration; +use sp_core::storage::well_known_keys; +use sp_runtime::traits::{Block as BlockT, Header, NumberFor}; +use std::{fmt::Debug, str::FromStr}; + +/// Configurations of the [`Command::OffchainWorker`]. +#[derive(Debug, Clone, structopt::StructOpt)] +pub struct OffchainWorkerCmd { + /// Overwrite the wasm code in state or not. + #[structopt(long)] + overwrite_wasm_code: bool, + + /// The block hash at which to fetch the header. + /// + /// If the `live` state type is being used, then this can be omitted, and is equal to whatever + /// the `state::at` is. Only use this (with care) when combined with a snapshot. + #[structopt( + long, + multiple = false, + parse(try_from_str = parse::hash) + )] + header_at: Option, + + /// The ws uri from which to fetch the header. + /// + /// If the `live` state type is being used, then this can be omitted, and is equal to whatever + /// the `state::uri` is. Only use this (with care) when combined with a snapshot. + #[structopt( + long, + multiple = false, + parse(try_from_str = parse::url) + )] + header_ws_uri: Option, + + /// The state type to use. + #[structopt(subcommand)] + pub state: State, +} + +impl OffchainWorkerCmd { + fn header_at(&self) -> sc_cli::Result + where + Block::Hash: FromStr, + ::Err: Debug, + { + match (&self.header_at, &self.state) { + (Some(header_at), State::Snap { .. }) => hash_of::(&header_at), + (Some(header_at), State::Live { .. }) => { + log::error!(target: LOG_TARGET, "--header-at is provided while state type is live, this will most likely lead to a nonsensical result."); + hash_of::(&header_at) + }, + (None, State::Live { at: Some(at), .. }) => hash_of::(&at), + _ => { + panic!("either `--header-at` must be provided, or state must be `live` with a proper `--at`"); + }, + } + } + + fn header_ws_uri(&self) -> String + where + Block::Hash: FromStr, + ::Err: Debug, + { + match (&self.header_ws_uri, &self.state) { + (Some(header_ws_uri), State::Snap { .. }) => header_ws_uri.to_owned(), + (Some(header_ws_uri), State::Live { .. }) => { + log::error!(target: LOG_TARGET, "--header-uri is provided while state type is live, this will most likely lead to a nonsensical result."); + header_ws_uri.to_owned() + }, + (None, State::Live { uri, .. }) => uri.clone(), + (None, State::Snap { .. }) => { + panic!("either `--header-uri` must be provided, or state must be `live`"); + }, + } + } +} + +pub(crate) async fn offchain_worker( + shared: SharedParams, + command: OffchainWorkerCmd, + config: Configuration, +) -> sc_cli::Result<()> +where + Block: BlockT + serde::de::DeserializeOwned, + Block::Hash: FromStr, + Block::Header: serde::de::DeserializeOwned, + ::Err: Debug, + NumberFor: FromStr, + as FromStr>::Err: Debug, + ExecDispatch: NativeExecutionDispatch + 'static, +{ + let executor = build_executor(&shared, &config); + let execution = shared.execution; + + let header_at = command.header_at::()?; + let header_ws_uri = command.header_ws_uri::(); + + let header = rpc_api::get_header::(header_ws_uri.clone(), header_at).await?; + log::info!( + target: LOG_TARGET, + "fetched header from {:?}, block number: {:?}", + header_ws_uri, + header.number() + ); + + let ext = { + let builder = command.state.builder::()?; + + let builder = if command.overwrite_wasm_code { + let (code_key, code) = extract_code(&config.chain_spec)?; + builder.inject_key_value(&[(code_key, code)]) + } else { + builder.inject_hashed_key(well_known_keys::CODE) + }; + + builder.build().await? + }; + + let (expected_spec_name, expected_spec_version) = + local_spec::(&ext, &executor); + ensure_matching_spec::( + header_ws_uri, + expected_spec_name, + expected_spec_version, + shared.no_spec_name_check, + ) + .await; + + let _ = state_machine_call::( + &ext, + &executor, + execution, + "OffchainWorkerApi_offchain_worker", + header.encode().as_ref(), + full_extensions(), + )?; + + log::info!(target: LOG_TARGET, "OffchainWorkerApi_offchain_worker executed without errors."); + + Ok(()) +} diff --git a/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs b/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs new file mode 100644 index 0000000000000..86f5548b8aafa --- /dev/null +++ b/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs @@ -0,0 +1,92 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{fmt::Debug, str::FromStr}; + +use parity_scale_codec::Decode; +use sc_executor::NativeExecutionDispatch; +use sc_service::Configuration; +use sp_runtime::traits::{Block as BlockT, NumberFor}; + +use crate::{ + build_executor, ensure_matching_spec, extract_code, local_spec, state_machine_call, + SharedParams, State, LOG_TARGET, +}; + +/// Configurations of the [`Command::OnRuntimeUpgrade`]. +#[derive(Debug, Clone, structopt::StructOpt)] +pub struct OnRuntimeUpgradeCmd { + /// The state type to use. + #[structopt(subcommand)] + pub state: State, +} + +pub(crate) async fn on_runtime_upgrade( + shared: SharedParams, + command: OnRuntimeUpgradeCmd, + config: Configuration, +) -> sc_cli::Result<()> +where + Block: BlockT + serde::de::DeserializeOwned, + Block::Hash: FromStr, + ::Err: Debug, + NumberFor: FromStr, + as FromStr>::Err: Debug, + ExecDispatch: NativeExecutionDispatch + 'static, +{ + let executor = build_executor(&shared, &config); + let execution = shared.execution; + + let ext = { + let builder = command.state.builder::()?; + let (code_key, code) = extract_code(&config.chain_spec)?; + builder.inject_key_value(&[(code_key, code)]).build().await? + }; + + if let Some(uri) = command.state.live_uri() { + let (expected_spec_name, expected_spec_version) = + local_spec::(&ext, &executor); + ensure_matching_spec::( + uri, + expected_spec_name, + expected_spec_version, + shared.no_spec_name_check, + ) + .await; + } + + let (_, encoded_result) = state_machine_call::( + &ext, + &executor, + execution, + "TryRuntime_on_runtime_upgrade", + &[], + Default::default(), // we don't really need any extensions here. + )?; + + let (weight, total_weight) = <(u64, u64) as Decode>::decode(&mut &*encoded_result) + .map_err(|e| format!("failed to decode output: {:?}", e))?; + log::info!( + target: LOG_TARGET, + "TryRuntime_on_runtime_upgrade executed without errors. Consumed weight = {}, total weight = {} ({})", + weight, + total_weight, + weight as f64 / total_weight.max(1) as f64 + ); + + Ok(()) +} diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs index c92c3959535e9..d5ccca9560252 100644 --- a/utils/frame/try-runtime/cli/src/lib.rs +++ b/utils/frame/try-runtime/cli/src/lib.rs @@ -15,67 +15,369 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! `Structopt`-ready structs for `try-runtime`. - -use parity_scale_codec::{Decode, Encode}; -use remote_externalities::{rpc_api, Builder, Mode, OfflineConfig, OnlineConfig, SnapshotConfig}; +//! # Try-runtime +//! +//! Substrate's ultimate testing framework for the power users. +//! +//! > As the name suggests, `try-runtime` is a detailed testing framework that gives you a lot of +//! control over what is being executed in which environment. It is recommended that user's first +//! familiarize themselves with substrate in depth, particularly the execution model. It is critical +//! to deeply understand how the wasm/native interactions, and the runtime apis work in the +//! substrate runtime, before commencing to working with `try-runtime`. +//! +//! #### Resources +//! +//! Some resources about the above: +//! +//! 1. +//! 2. +//! 3. +//! +//! --- +//! +//! ## Overview +//! +//! The basis of all try-runtime commands is the same: connect to a live node, scrape its *state* +//! and put it inside a `TestExternalities`, then call into a *specific runtime-api* using the given +//! state and some *runtime*. +//! +//! All of the variables in the above statement are made *italic*. Let's look at each of them: +//! +//! 1. **State** is the key-value pairs of data that comprise the canonical information that any +//! blockchain is keeping. A state can be full (all key-value pairs), or be partial (only pairs +//! related to some pallets). Moreover, some keys are special and are not related to specific +//! pallets, known as [`well_known_keys`] in substrate. The most important of these is the +//! `:CODE:` key, which contains the code used for execution, when wasm execution is chosen. +//! +//! 2. *A runtime-api* call is a call into a function defined in the runtime, *on top of a given +//! state*. Each subcommand of `try-runtime` utilizes a specific *runtime-api*. +//! +//! 3. Finally, the **runtime** is the actual code that is used to execute the aforementioned +//! runtime-api. All substrate based chains always have two runtimes: native and wasm. The +//! decision of which one is chosen is non-trivial. First, let's look at the options: +//! +//! 1. Native: this means that the runtime that is **in your codebase**, aka whatever you see in +//! your editor, is being used. This runtime is easier for diagnostics. We refer to this as +//! the "local runtime". +//! +//! 2. Wasm: this means that whatever is stored in the `:CODE:` key of the state that your +//! scrape is being used. In plain sight, since the entire state (including `:CODE:`) is +//! scraped from a remote chain, you could conclude that the wasm runtime, if used, is always +//! equal to the canonical runtime of the live chain (i.e. NOT the "local runtime"). That's +//! factually true, but then the testing would be quite lame. Typically, with try-runtime, +//! you don't want to execute whatever code is already on the live chain. Instead, you want +//! your local runtime (which typically includes a non-released feature) to be used. This is +//! why try-runtime overwrites the wasm runtime (at `:CODE:`) with the local runtime as well. +//! That being said, this behavior can be controlled in certain subcommands with a special +//! flag (`--overwrite-wasm-code`). +//! +//! The decision of which runtime is eventually used is based on two facts: +//! +//! 1. `--execution` flag. If you specify `wasm`, then it is *always* wasm. If it is `native`, then +//! if and ONLY IF the spec versions match, then the native runtime is used. Else, wasm runtime +//! is used again. +//! 2. `--chain` flag (if present in your cli), which determines *which local runtime*, is selected. +//! This will specify: +//! 1. which native runtime is used, if you select `--execution Native` +//! 2. which wasm runtime is used to replace the `:CODE:`, if try-runtime is instructed to do +//! so. +//! +//! All in all, if the term "local runtime" is used in the rest of this crate's documentation, it +//! means either the native runtime, or the wasm runtime when overwritten inside `:CODE:`. In other +//! words, it means your... well, "local runtime", regardless of wasm or native. +//! +//! //! See [`Command`] for more information about each command's specific customization flags, and +//! assumptions regarding the runtime being used. +//! +//! Finally, To make sure there are no errors regarding this, always run any `try-runtime` command +//! with `executor=trace` logging targets, which will specify which runtime is being used per api +//! call. +//! +//! Furthermore, other relevant log targets are: `try-runtime::cli`, `remote-ext`, and `runtime`. +//! +//! ## Spec name check +//! +//! A common pitfall is that you might be running some test on top of the state of chain `x`, with +//! the runtime of chain `y`. To avoid this all commands do a spec-name check before executing +//! anything by default. This will check the spec name of the remote node your are connected to, +//! with the spec name of your local runtime and ensure that they match. +//! +//! Should you need to disable this on certain occasions, a top level flag of `--no-spec-name-check` +//! can be used. +//! +//! The spec version is also always inspected, but if it is a mismatch, it will only emit a warning. +//! +//! ## Note nodes that operate with `try-runtime` +//! +//! There are a number of flags that need to be preferably set on a running node in order to work +//! well with try-runtime's expensive RPC queries: +//! +//! - set `--rpc-max-payload 1000` to ensure large RPC queries can work. +//! - set `--rpc-cors all` to ensure ws connections can come through. +//! +//! Note that *none* of the try-runtime operations need unsafe RPCs. +//! +//! ## Migration Best Practices +//! +//! One of the main use-cases of try-runtime is using it for testing storage migrations. The +//! following points makes sure you can *effectively* test your migrations with try-runtime. +//! +//! #### Adding pre/post hooks +//! +//! One of the gems that come only in the `try-runtime` feature flag is the `pre_upgrade` and +//! `post_upgrade` hooks for [`OnRuntimeUpgrade`]. This trait is implemented either inside the +//! pallet, or manually in a runtime, to define a migration. In both cases, these functions can be +//! added, given the right flag: +//! +//! ```ignore +//! #[cfg(feature = try-runtime)] +//! fn pre_upgrade() -> Result<(), &'static str> {} +//! +//! #[cfg(feature = try-runtime)] +//! fn post_upgrade() -> Result<(), &'static str> {} +//! ``` +//! +//! (The pallet macro syntax will support this simply as a part of `#[pallet::hooks]`). +//! +//! These hooks allow you to execute some code, only within the `on-runtime-upgrade` command, before +//! and after the migration. If any data needs to be temporarily stored between the pre/post +//! migration hooks, [`OnRuntimeUpgradeHelpersExt`] can help with that. +//! +//! #### Logging +//! +//! It is super helpful to make sure your migration code uses logging (always with a `runtime` log +//! target prefix, e.g. `runtime::balance`) and state exactly at which stage it is, and what it is +//! doing. +//! +//! #### Guarding migrations +//! +//! Always make sure that any migration code is guarded either by [`StorageVersion`], or by some +//! custom storage item, so that it is NEVER executed twice, even if the code lives in two +//! consecutive runtimes. +//! +//! ## Examples +//! +//! Run the migrations of the local runtime on the state of polkadot, from the polkadot repo where +//! we have `--chain polkadot-dev`, on the latest finalized block's state +//! +//! ```ignore +//! RUST_LOG=runtime=trace,try-runtime::cli=trace,executor=trace \ +//! cargo run try-runtime \ +//! --execution Native \ +//! --chain polkadot-dev \ +//! on-runtime-upgrade \ +//! live \ +//! --uri wss://rpc.polkadot.io +//! # note that we don't pass any --at, nothing means latest block. +//! ``` +//! +//! Same as previous one, but let's say we want to run this command from the substrate repo, where +//! we don't have a matching spec name/version. +//! +//! ```ignore +//! RUST_LOG=runtime=trace,try-runtime::cli=trace,executor=trace \ +//! cargo run try-runtime \ +//! --execution Native \ +//! --chain dev \ +//! --no-spec-name-check \ # mind this one! +//! on-runtime-upgrade \ +//! live \ +//! --uri wss://rpc.polkadot.io +//! ``` +//! +//! Same as the previous one, but run it at specific block number's state. This means that this +//! block hash's state shall not yet have been pruned in `rpc.polkadot.io`. +//! +//! ```ignore +//! RUST_LOG=runtime=trace,try-runtime::cli=trace,executor=trace \ +//! cargo run try-runtime \ +//! --execution Native \ +//! --chain dev \ +//! --no-spec-name-check \ # mind this one! on-runtime-upgrade \ +//! on-runtime-upgrade \ +//! live \ +//! --uri wss://rpc.polkadot.io \ +//! --at +//! ``` +//! +//! Moving to `execute-block` and `offchain-workers`. For these commands, you always needs to +//! specify a block hash. For the rest of these examples, we assume we're in the polkadot repo. +//! +//! First, let's assume you are in a branch that has the same spec name/version as the live polkadot +//! network. +//! +//! ```ignore +//! RUST_LOG=runtime=trace,try-runtime::cli=trace,executor=trace \ +//! cargo run try-runtime \ +//! --execution Wasm \ +//! --chain polkadot-dev \ +//! --uri wss://rpc.polkadot.io \ +//! execute-block \ +//! live \ +//! --at +//! ``` +//! +//! This is wasm, so it will technically execute the code that lives on the live network. Let's say +//! you want to execute your local runtime. Since you have a matching spec versions, you can simply +//! change `--execution Wasm` to `--execution Native` to achieve this. Your logs of `executor=trace` +//! should show something among the lines of: +//! +//! ```ignore +//! Request for native execution succeeded (native: polkadot-9900 (parity-polkadot-0.tx7.au0), chain: polkadot-9900 (parity-polkadot-0.tx7.au0)) +//! ``` +//! +//! If you don't have matching spec versions, then are doomed to execute wasm. In this case, you can +//! manually overwrite the wasm code with your local runtime: +//! +//! ```ignore +//! RUST_LOG=runtime=trace,try-runtime::cli=trace,executor=trace \ +//! cargo run try-runtime \ +//! --execution Wasm \ +//! --chain polkadot-dev \ +//! execute-block \ +//! live \ +//! --uri wss://rpc.polkadot.io \ +//! --at \ +//! --overwrite-wasm-code +//! ``` +//! +//! For all of these blocks, the block with hash `` is being used, and the initial state +//! is the state of the parent hash. This is because by omitting [`ExecuteBlockCmd::block_at`], the +//! `--at` is used for both. This should be good enough for 99% of the cases. The only case where +//! you need to specify `block-at` and `block-ws-uri` is with snapshots. Let's say you have a file +//! `snap` and you know it corresponds to the state of the parent block of `X`. Then you'd do: +//! +//! ```ignore +//! RUST_LOG=runtime=trace,try-runtime::cli=trace,executor=trace \ +//! cargo run try-runtime \ +//! --execution Wasm \ +//! --chain polkadot-dev \ +//! --uri wss://rpc.polkadot.io \ +//! execute-block \ +//! --block-at \ +//! --block-ws-uri wss://rpc.polkadot.io \ +//! --overwrite-wasm-code \ +//! snap \ +//! -s snap \ +//! ``` + +use parity_scale_codec::Decode; +use remote_externalities::{ + Builder, Mode, OfflineConfig, OnlineConfig, SnapshotConfig, TestExternalities, +}; use sc_chain_spec::ChainSpec; use sc_cli::{CliConfiguration, ExecutionStrategy, WasmExecutionMethod}; use sc_executor::NativeElseWasmExecutor; use sc_service::{Configuration, NativeExecutionDispatch}; use sp_core::{ - hashing::twox_128, offchain::{ testing::{TestOffchainExt, TestTransactionPoolExt}, OffchainDbExt, OffchainWorkerExt, TransactionPoolExt, }, storage::{well_known_keys, StorageData, StorageKey}, + testing::TaskExecutor, + traits::TaskExecutorExt, + twox_128, H256, }; +use sp_externalities::Extensions; use sp_keystore::{testing::KeyStore, KeystoreExt}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; -use sp_state_machine::StateMachine; -use std::{fmt::Debug, path::PathBuf, str::FromStr, sync::Arc}; +use sp_runtime::traits::{Block as BlockT, NumberFor}; +use sp_state_machine::{OverlayedChanges, StateMachine}; +use std::{fmt::Debug, path::PathBuf, str::FromStr}; -mod parse; +mod commands; +pub(crate) mod parse; +pub(crate) const LOG_TARGET: &'static str = "try-runtime::cli"; -/// Possible subcommands of `try-runtime`. +/// Possible commands of `try-runtime`. #[derive(Debug, Clone, structopt::StructOpt)] pub enum Command { - /// Execute "TryRuntime_on_runtime_upgrade" against the given runtime state. - OnRuntimeUpgrade(OnRuntimeUpgradeCmd), - /// Execute "OffchainWorkerApi_offchain_worker" against the given runtime state. - OffchainWorker(OffchainWorkerCmd), - /// Execute "Core_execute_block" using the given block and the runtime state of the parent - /// block. - ExecuteBlock(ExecuteBlockCmd), -} - -#[derive(Debug, Clone, structopt::StructOpt)] -pub struct OnRuntimeUpgradeCmd { - #[structopt(subcommand)] - pub state: State, -} - -#[derive(Debug, Clone, structopt::StructOpt)] -pub struct OffchainWorkerCmd { - #[structopt(subcommand)] - pub state: State, -} - -#[derive(Debug, Clone, structopt::StructOpt)] -pub struct ExecuteBlockCmd { - #[structopt(subcommand)] - pub state: State, + /// Execute the migrations of the "local runtime". + /// + /// This uses a custom runtime api call, namely "TryRuntime_on_runtime_upgrade". + /// + /// This always overwrites the wasm code with the local runtime (specified by `--chain`), to + /// ensure the new migrations are being executed. Re-executing already existing migrations is + /// evidently not very exciting. + OnRuntimeUpgrade(commands::on_runtime_upgrade::OnRuntimeUpgradeCmd), + + /// Executes the given block against some state. + /// + /// Unlike [`Command:::OnRuntimeUpgrade`], this command needs two inputs: the state, and the + /// block data. Since the state could be cached (see [`State::Snap`]), different flags are + /// provided for both. `--block-at` and `--block-uri`, if provided, are only used for fetching + /// the block. For convenience, these flags can be both emitted, if the [`State::Live`] is + /// being used. + /// + /// Note that by default, this command does not overwrite the code, so in wasm execution, the + /// live chain's code is used. This can be disabled if desired, see + /// [`ExecuteBlockCmd::overwrite_wasm_code`]. + /// + /// Note that if you do overwrite the wasm code, or generally use the local runtime for this, + /// you might + /// - not be able to decode the block, if the block format has changed. + /// - quite possibly will get a signature verification failure, since the spec and + /// transaction version are part of the signature's payload, and if they differ between + /// your local runtime and the remote counterparts, the signatures cannot be verified. + /// - almost certainly will get a state root mismatch, since, well, you are executing a + /// different state transition function. + /// + /// To make testing slightly more dynamic, you can disable the state root check by enabling + /// [`ExecuteBlockCmd::no_check`]. If you get signature verification errors, you should + /// manually tweak your local runtime's spec version to fix this. + /// + /// A subtle detail of execute block is that if you want to execute block 100 of a live chain + /// again, you need to scrape the state of block 99. This is already done automatically if you + /// use [`State::Live`], and the parent hash of the target block is used to scrape the state. + /// If [`State::Snap`] is being used, then this needs to be manually taken into consideration. + /// + /// This executes the same runtime api as normal block import, namely `Core_execute_block`. If + /// [`ExecuteBlockCmd::no_check`] is set, it uses a custom, try-runtime-only runtime + /// api called `TryRuntime_execute_block_no_check`. + ExecuteBlock(commands::execute_block::ExecuteBlockCmd), + + /// Executes *the offchain worker hooks* of a given block against some state. + /// + /// Similar to [`Command:::ExecuteBlock`], this command needs two inputs: the state, and the + /// header data. Likewise, `--header-at` and `--header-uri` can be filled, or omitted if + /// [`State::Live`] is used. + /// + /// Similar to [`Command:::ExecuteBlock`], this command does not overwrite the code, so in wasm + /// execution, the live chain's code is used. This can be disabled if desired, see + /// [`OffchainWorkerCmd::overwrite_wasm_code`]. + /// + /// This executes the same runtime api as normal block import, namely + /// `OffchainWorkerApi_offchain_worker`. + OffchainWorker(commands::offchain_worker::OffchainWorkerCmd), + + /// Follow the given chain's finalized blocks and apply all of its extrinsics. + /// + /// This is essentially repeated calls to [`Command::ExecuteBlock`], whilst the local runtime + /// is always at use, the state root check is disabled, and the state is persisted between + /// executions. + /// + /// This allows the behavior of a new runtime to be inspected over a long period of time, with + /// realistic transactions coming as input. + /// + /// NOTE: this does NOT execute the offchain worker hooks of mirrored blocks. This might be + /// added in the future. + /// + /// This does not support snapshot states, and can only work with a remote chain. Upon first + /// connections, starts listening for finalized block events. Upon first block notification, it + /// initializes the state from the remote node, and starts applying that block, plus all the + /// blocks that follow, to the same growing state. + FollowChain(commands::follow_chain::FollowChainCmd), } +/// Shared parameters of the `try-runtime` commands #[derive(Debug, Clone, structopt::StructOpt)] pub struct SharedParams { - /// The shared parameters + /// Shared parameters of substrate cli. #[allow(missing_docs)] #[structopt(flatten)] pub shared_params: sc_cli::SharedParams, - /// The execution strategy that should be used for benchmarks + /// The execution strategy that should be used. #[structopt( long = "execution", value_name = "STRATEGY", @@ -85,7 +387,7 @@ pub struct SharedParams { )] pub execution: ExecutionStrategy, - /// Method for executing Wasm runtime code. + /// Type of wasm execution used. #[structopt( long = "wasm-execution", value_name = "METHOD", @@ -96,52 +398,18 @@ pub struct SharedParams { pub wasm_method: WasmExecutionMethod, /// The number of 64KB pages to allocate for Wasm execution. Defaults to - /// sc_service::Configuration.default_heap_pages. + /// [`sc_service::Configuration.default_heap_pages`]. #[structopt(long)] pub heap_pages: Option, - /// The block hash at which to read state. This is required for execute-block, offchain-worker, - /// or any command that used the live subcommand. - #[structopt( - short, - long, - multiple = false, - parse(try_from_str = parse::hash), - required_ifs( - &[("command", "offchain-worker"), ("command", "execute-block"), ("subcommand", "live")] - ) - )] - block_at: String, - - /// Whether or not to overwrite the code from state with the code from - /// the specified chain spec. + /// When enabled, the spec name check will not panic, and instead only show a warning. #[structopt(long)] - pub overwrite_code: bool, - - /// The url to connect to. - // TODO having this a shared parm is a temporary hack; the url is used just - // to get the header/block. We should try and get that out of state, OR allow - // the user to feed in a header/block via file. - // https://github.com/paritytech/substrate/issues/9027 - #[structopt(short, long, default_value = "ws://localhost:9944", parse(try_from_str = parse::url))] - url: String, + pub no_spec_name_check: bool, } -impl SharedParams { - /// Get the configured value of `block_at`, interpreted as the hash type of `Block`. - pub fn block_at(&self) -> sc_cli::Result - where - Block: BlockT, - ::Hash: FromStr, - <::Hash as FromStr>::Err: Debug, - { - self.block_at - .parse::<::Hash>() - .map_err(|e| format!("Could not parse block hash: {:?}", e).into()) - } -} - -/// Various commands to try out against runtime state at a specific block. +/// Our `try-runtime` command. +/// +/// See [`Command`] for more info. #[derive(Debug, Clone, structopt::StructOpt)] pub struct TryRuntimeCmd { #[structopt(flatten)] @@ -151,11 +419,12 @@ pub struct TryRuntimeCmd { pub command: Command, } -/// The source of runtime state to try operations against. +/// The source of runtime *state* to use. #[derive(Debug, Clone, structopt::StructOpt)] pub enum State { - /// Use a state snapshot as the source of runtime state. NOTE: for the offchain-worker and - /// execute-block command this is only partially supported and requires a archive node url. + /// Use a state snapshot as the source of runtime state. + /// + /// This can be crated by passing a value to [`State::Live::snapshot_path`]. Snap { #[structopt(short, long)] snapshot_path: PathBuf, @@ -163,285 +432,80 @@ pub enum State { /// Use a live chain as the source of runtime state. Live { + /// The url to connect to. + #[structopt( + short, + long, + parse(try_from_str = parse::url), + )] + uri: String, + + /// The block hash at which to fetch the state. + /// + /// If non provided, then the latest finalized head is used. This is particularly useful + /// for [`Command::OnRuntimeUpgrade`]. + #[structopt( + short, + long, + multiple = false, + parse(try_from_str = parse::hash), + )] + at: Option, + /// An optional state snapshot file to WRITE to. Not written if set to `None`. #[structopt(short, long)] snapshot_path: Option, - /// The modules to scrape. If empty, entire chain state will be scraped. + /// The pallets to scrape. If empty, entire chain state will be scraped. #[structopt(short, long, require_delimiter = true)] - modules: Option>, + pallets: Option>, }, } -async fn on_runtime_upgrade( - shared: SharedParams, - command: OnRuntimeUpgradeCmd, - config: Configuration, -) -> sc_cli::Result<()> -where - Block: BlockT + serde::de::DeserializeOwned, - Block::Hash: FromStr, - ::Err: Debug, - NumberFor: FromStr, - as FromStr>::Err: Debug, - ExecDispatch: NativeExecutionDispatch + 'static, -{ - let wasm_method = shared.wasm_method; - let execution = shared.execution; - let heap_pages = shared.heap_pages.or(config.default_heap_pages); - - let mut changes = Default::default(); - let max_runtime_instances = config.max_runtime_instances; - let executor = NativeElseWasmExecutor::::new( - wasm_method.into(), - heap_pages, - max_runtime_instances, - ); - - check_spec_name::(shared.url.clone(), config.chain_spec.name().to_string()).await; - - let ext = { - let builder = match command.state { +impl State { + /// Create the [`remote_externalities::Builder`] from self. + pub(crate) fn builder(&self) -> sc_cli::Result> + where + Block::Hash: FromStr, + ::Err: Debug, + { + Ok(match self { State::Snap { snapshot_path } => Builder::::new().mode(Mode::Offline(OfflineConfig { state_snapshot: SnapshotConfig::new(snapshot_path), })), - State::Live { snapshot_path, modules } => - Builder::::new().mode(Mode::Online(OnlineConfig { - transport: shared.url.to_owned().into(), - state_snapshot: snapshot_path.as_ref().map(SnapshotConfig::new), - modules: modules.to_owned().unwrap_or_default(), - at: Some(shared.block_at::()?), - ..Default::default() - })), - }; - - let (code_key, code) = extract_code(config.chain_spec)?; - builder - .inject_key_value(&[(code_key, code)]) - .inject_hashed_key(&[twox_128(b"System"), twox_128(b"LastRuntimeUpgrade")].concat()) - .build() - .await? - }; - - let encoded_result = StateMachine::<_, _, NumberFor, _>::new( - &ext.backend, - None, - &mut changes, - &executor, - "TryRuntime_on_runtime_upgrade", - &[], - ext.extensions, - &sp_state_machine::backend::BackendRuntimeCode::new(&ext.backend).runtime_code()?, - sp_core::testing::TaskExecutor::new(), - ) - .execute(execution.into()) - .map_err(|e| format!("failed to execute 'TryRuntime_on_runtime_upgrade': {:?}", e))?; - - let (weight, total_weight) = <(u64, u64) as Decode>::decode(&mut &*encoded_result) - .map_err(|e| format!("failed to decode output: {:?}", e))?; - log::info!( - "TryRuntime_on_runtime_upgrade executed without errors. Consumed weight = {}, total weight = {} ({})", - weight, - total_weight, - weight as f64 / total_weight as f64 - ); - - Ok(()) -} - -async fn offchain_worker( - shared: SharedParams, - command: OffchainWorkerCmd, - config: Configuration, -) -> sc_cli::Result<()> -where - Block: BlockT + serde::de::DeserializeOwned, - Block::Hash: FromStr, - Block::Header: serde::de::DeserializeOwned, - ::Err: Debug, - NumberFor: FromStr, - as FromStr>::Err: Debug, - ExecDispatch: NativeExecutionDispatch + 'static, -{ - let wasm_method = shared.wasm_method; - let execution = shared.execution; - let heap_pages = shared.heap_pages.or(config.default_heap_pages); - - let mut changes = Default::default(); - let max_runtime_instances = config.max_runtime_instances; - let executor = NativeElseWasmExecutor::::new( - wasm_method.into(), - heap_pages, - max_runtime_instances, - ); - - check_spec_name::(shared.url.clone(), config.chain_spec.name().to_string()).await; - - let mode = match command.state { - State::Live { snapshot_path, modules } => { - let at = shared.block_at::()?; - let online_config = OnlineConfig { - transport: shared.url.to_owned().into(), - state_snapshot: snapshot_path.as_ref().map(SnapshotConfig::new), - modules: modules.to_owned().unwrap_or_default(), - at: Some(at), - ..Default::default() - }; - - Mode::Online(online_config) - }, - State::Snap { snapshot_path } => { - let mode = - Mode::Offline(OfflineConfig { state_snapshot: SnapshotConfig::new(snapshot_path) }); - - mode - }, - }; - let builder = Builder::::new() - .mode(mode) - .inject_hashed_key(&[twox_128(b"System"), twox_128(b"LastRuntimeUpgrade")].concat()); - let mut ext = if shared.overwrite_code { - let (code_key, code) = extract_code(config.chain_spec)?; - builder.inject_key_value(&[(code_key, code)]).build().await? - } else { - builder.inject_hashed_key(well_known_keys::CODE).build().await? - }; - - let (offchain, _offchain_state) = TestOffchainExt::new(); - let (pool, _pool_state) = TestTransactionPoolExt::new(); - ext.register_extension(OffchainDbExt::new(offchain.clone())); - ext.register_extension(OffchainWorkerExt::new(offchain)); - ext.register_extension(KeystoreExt(Arc::new(KeyStore::new()))); - ext.register_extension(TransactionPoolExt::new(pool)); - - let header_hash = shared.block_at::()?; - let header = rpc_api::get_header::(shared.url, header_hash).await?; - - let _ = StateMachine::<_, _, NumberFor, _>::new( - &ext.backend, - None, - &mut changes, - &executor, - "OffchainWorkerApi_offchain_worker", - header.encode().as_ref(), - ext.extensions, - &sp_state_machine::backend::BackendRuntimeCode::new(&ext.backend).runtime_code()?, - sp_core::testing::TaskExecutor::new(), - ) - .execute(execution.into()) - .map_err(|e| format!("failed to execute 'OffchainWorkerApi_offchain_worker': {:?}", e))?; - - log::info!("OffchainWorkerApi_offchain_worker executed without errors."); - - Ok(()) -} - -async fn execute_block( - shared: SharedParams, - command: ExecuteBlockCmd, - config: Configuration, -) -> sc_cli::Result<()> -where - Block: BlockT + serde::de::DeserializeOwned, - Block::Hash: FromStr, - ::Err: Debug, - NumberFor: FromStr, - as FromStr>::Err: Debug, - ExecDispatch: NativeExecutionDispatch + 'static, -{ - let wasm_method = shared.wasm_method; - let execution = shared.execution; - let heap_pages = shared.heap_pages.or(config.default_heap_pages); - - let mut changes = Default::default(); - let max_runtime_instances = config.max_runtime_instances; - let executor = NativeElseWasmExecutor::::new( - wasm_method.into(), - heap_pages, - max_runtime_instances, - ); - - let block_hash = shared.block_at::()?; - let block: Block = rpc_api::get_block::(shared.url.clone(), block_hash).await?; - - check_spec_name::(shared.url.clone(), config.chain_spec.name().to_string()).await; - - let mode = match command.state { - State::Snap { snapshot_path } => { - let mode = - Mode::Offline(OfflineConfig { state_snapshot: SnapshotConfig::new(snapshot_path) }); - - mode - }, - State::Live { snapshot_path, modules } => { - let parent_hash = block.header().parent_hash(); - - let mode = Mode::Online(OnlineConfig { - transport: shared.url.to_owned().into(), - state_snapshot: snapshot_path.as_ref().map(SnapshotConfig::new), - modules: modules.to_owned().unwrap_or_default(), - at: Some(parent_hash.to_owned()), - ..Default::default() - }); - - mode - }, - }; - - let ext = { - let builder = Builder::::new() - .mode(mode) - .inject_hashed_key(&[twox_128(b"System"), twox_128(b"LastRuntimeUpgrade")].concat()); - let mut ext = if shared.overwrite_code { - let (code_key, code) = extract_code(config.chain_spec)?; - builder.inject_key_value(&[(code_key, code)]).build().await? - } else { - builder.inject_hashed_key(well_known_keys::CODE).build().await? - }; - - // register externality extensions in order to provide host interface for OCW to the - // runtime. - let (offchain, _offchain_state) = TestOffchainExt::new(); - let (pool, _pool_state) = TestTransactionPoolExt::new(); - ext.register_extension(OffchainDbExt::new(offchain.clone())); - ext.register_extension(OffchainWorkerExt::new(offchain)); - ext.register_extension(KeystoreExt(Arc::new(KeyStore::new()))); - ext.register_extension(TransactionPoolExt::new(pool)); - - ext - }; - - // A digest item gets added when the runtime is processing the block, so we need to pop - // the last one to be consistent with what a gossiped block would contain. - let (mut header, extrinsics) = block.deconstruct(); - header.digest_mut().pop(); - let block = Block::new(header, extrinsics); - - let _encoded_result = StateMachine::<_, _, NumberFor, _>::new( - &ext.backend, - None, - &mut changes, - &executor, - "Core_execute_block", - block.encode().as_ref(), - ext.extensions, - &sp_state_machine::backend::BackendRuntimeCode::new(&ext.backend).runtime_code()?, - sp_core::testing::TaskExecutor::new(), - ) - .execute(execution.into()) - .map_err(|e| format!("failed to execute 'Core_execute_block': {:?}", e))?; - debug_assert!(_encoded_result == vec![1]); - - log::info!("Core_execute_block executed without errors."); + State::Live { snapshot_path, pallets, uri, at } => { + let at = match at { + Some(at_str) => Some(hash_of::(at_str)?), + None => None, + }; + Builder::::new() + .mode(Mode::Online(OnlineConfig { + transport: uri.to_owned().into(), + state_snapshot: snapshot_path.as_ref().map(SnapshotConfig::new), + pallets: pallets.to_owned().unwrap_or_default(), + at, + })) + .inject_hashed_key( + &[twox_128(b"System"), twox_128(b"LastRuntimeUpgrade")].concat(), + ) + }, + }) + } - Ok(()) + /// Get the uri, if self is `Live`. + pub(crate) fn live_uri(&self) -> Option { + match self { + State::Live { uri, .. } => Some(uri.clone()), + _ => None, + } + } } impl TryRuntimeCmd { pub async fn run(&self, config: Configuration) -> sc_cli::Result<()> where - Block: BlockT + serde::de::DeserializeOwned, + Block: BlockT + serde::de::DeserializeOwned, Block::Header: serde::de::DeserializeOwned, Block::Hash: FromStr, ::Err: Debug, @@ -451,13 +515,33 @@ impl TryRuntimeCmd { { match &self.command { Command::OnRuntimeUpgrade(ref cmd) => - on_runtime_upgrade::(self.shared.clone(), cmd.clone(), config) - .await, + commands::on_runtime_upgrade::on_runtime_upgrade::( + self.shared.clone(), + cmd.clone(), + config, + ) + .await, Command::OffchainWorker(cmd) => - offchain_worker::(self.shared.clone(), cmd.clone(), config) - .await, + commands::offchain_worker::offchain_worker::( + self.shared.clone(), + cmd.clone(), + config, + ) + .await, Command::ExecuteBlock(cmd) => - execute_block::(self.shared.clone(), cmd.clone(), config).await, + commands::execute_block::execute_block::( + self.shared.clone(), + cmd.clone(), + config, + ) + .await, + Command::FollowChain(cmd) => + commands::follow_chain::follow_chain::( + self.shared.clone(), + cmd.clone(), + config, + ) + .await, } } } @@ -477,7 +561,7 @@ impl CliConfiguration for TryRuntimeCmd { /// Extract `:code` from the given chain spec and return as `StorageData` along with the /// corresponding `StorageKey`. -fn extract_code(spec: Box) -> sc_cli::Result<(StorageKey, StorageData)> { +pub(crate) fn extract_code(spec: &Box) -> sc_cli::Result<(StorageKey, StorageData)> { let genesis_storage = spec.build_storage()?; let code = StorageData( genesis_storage @@ -491,31 +575,142 @@ fn extract_code(spec: Box) -> sc_cli::Result<(StorageKey, Storage Ok((code_key, code)) } +/// Get the hash type of the generic `Block` from a `hash_str`. +pub(crate) fn hash_of(hash_str: &str) -> sc_cli::Result +where + Block::Hash: FromStr, + ::Err: Debug, +{ + hash_str + .parse::<::Hash>() + .map_err(|e| format!("Could not parse block hash: {:?}", e).into()) +} + /// Check the spec_name of an `ext` /// -/// If the version does not exist, or if it does not match with the given, it emits a warning. -async fn check_spec_name( +/// If the spec names don't match, if `relaxed`, then it emits a warning, else it panics. +/// If the spec versions don't match, it only ever emits a warning. +pub(crate) async fn ensure_matching_spec( uri: String, expected_spec_name: String, + expected_spec_version: u32, + relaxed: bool, ) { - let expected_spec_name = expected_spec_name.to_lowercase(); match remote_externalities::rpc_api::get_runtime_version::(uri.clone(), None) .await - .map(|version| String::from(version.spec_name.clone())) - .map(|spec_name| spec_name.to_lowercase()) + .map(|version| (String::from(version.spec_name.clone()), version.spec_version)) + .map(|(spec_name, spec_version)| (spec_name.to_lowercase(), spec_version)) { - Ok(spec) if spec == expected_spec_name => { - log::debug!("found matching spec name: {:?}", spec); - }, - Ok(spec) => { - log::warn!( - "version mismatch: remote spec name: '{}', expected (local chain spec, aka. `--chain`): '{}'", - spec, - expected_spec_name, - ); + Ok((name, version)) => { + // first, deal with spec name + if expected_spec_name == name { + log::info!(target: LOG_TARGET, "found matching spec name: {:?}", name); + } else { + let msg = format!( + "version mismatch: remote spec name: '{}', expected (local chain spec, aka. `--chain`): '{}'", + name, + expected_spec_name + ); + if relaxed { + log::warn!(target: LOG_TARGET, "{}", msg); + } else { + panic!("{}", msg); + } + } + + if expected_spec_version == version { + log::info!(target: LOG_TARGET, "found matching spec version: {:?}", version); + } else { + log::warn!( + target: LOG_TARGET, + "spec version mismatch (local {} != remote {}). This could cause some issues.", + expected_spec_version, + version + ); + } }, Err(why) => { - log::error!("failed to fetch runtime version from {}: {:?}", uri, why); + log::error!( + target: LOG_TARGET, + "failed to fetch runtime version from {}: {:?}. Skipping the check", + uri, + why + ); }, } } + +/// Build all extensions that we typically use. +pub(crate) fn full_extensions() -> Extensions { + let mut extensions = Extensions::default(); + extensions.register(TaskExecutorExt::new(TaskExecutor::new())); + let (offchain, _offchain_state) = TestOffchainExt::new(); + let (pool, _pool_state) = TestTransactionPoolExt::new(); + extensions.register(OffchainDbExt::new(offchain.clone())); + extensions.register(OffchainWorkerExt::new(offchain)); + extensions.register(KeystoreExt(std::sync::Arc::new(KeyStore::new()))); + extensions.register(TransactionPoolExt::new(pool)); + + extensions +} + +/// Build a default execution that we typically use. +pub(crate) fn build_executor( + shared: &SharedParams, + config: &sc_service::Configuration, +) -> NativeElseWasmExecutor { + let wasm_method = shared.wasm_method; + let heap_pages = shared.heap_pages.or(config.default_heap_pages); + let max_runtime_instances = config.max_runtime_instances; + + NativeElseWasmExecutor::::new(wasm_method.into(), heap_pages, max_runtime_instances) +} + +/// Execute the given `method` and `data` on top of `ext`, returning the results (encoded) and the +/// state `changes`. +pub(crate) fn state_machine_call( + ext: &TestExternalities, + executor: &NativeElseWasmExecutor, + execution: sc_cli::ExecutionStrategy, + method: &'static str, + data: &[u8], + extensions: Extensions, +) -> sc_cli::Result<(OverlayedChanges, Vec)> { + let mut changes = Default::default(); + let encoded_results = StateMachine::<_, _, NumberFor, _>::new( + &ext.backend, + None, + &mut changes, + executor, + method, + data, + extensions, + &sp_state_machine::backend::BackendRuntimeCode::new(&ext.backend).runtime_code()?, + sp_core::testing::TaskExecutor::new(), + ) + .execute(execution.into()) + .map_err(|e| format!("failed to execute 'TryRuntime_on_runtime_upgrade': {:?}", e)) + .map_err::(Into::into)?; + + Ok((changes, encoded_results)) +} + +/// Get the spec `(name, version)` from the local runtime. +pub(crate) fn local_spec( + ext: &TestExternalities, + executor: &NativeElseWasmExecutor, +) -> (String, u32) { + let (_, encoded) = state_machine_call::( + &ext, + &executor, + sc_cli::ExecutionStrategy::NativeElseWasm, + "Core_version", + &[], + Default::default(), + ) + .expect("all runtimes should have version; qed"); + ::decode(&mut &*encoded) + .map_err(|e| format!("failed to decode output: {:?}", e)) + .map(|v| (v.spec_name.into(), v.spec_version)) + .expect("all runtimes should have version; qed") +} diff --git a/utils/prometheus/Cargo.toml b/utils/prometheus/Cargo.toml index 4a6cec2cac774..8b647d6282fba 100644 --- a/utils/prometheus/Cargo.toml +++ b/utils/prometheus/Cargo.toml @@ -17,7 +17,7 @@ log = "0.4.8" prometheus = { version = "0.11.0", default-features = false } futures-util = { version = "0.3.17", default-features = false, features = ["io"] } derive_more = "0.99" -async-std = { version = "1.6.5", features = ["unstable"] } +async-std = { version = "1.10.0", features = ["unstable"] } tokio = "1.10" hyper = { version = "0.14.11", default-features = false, features = ["http1", "server", "tcp"] } diff --git a/utils/wasm-builder/src/wasm_project.rs b/utils/wasm-builder/src/wasm_project.rs index 868692d341ff0..3806a890a1064 100644 --- a/utils/wasm-builder/src/wasm_project.rs +++ b/utils/wasm-builder/src/wasm_project.rs @@ -436,6 +436,10 @@ fn build_project(project: &Path, default_rustflags: &str, cargo_cmd: CargoComman // exclusive). The runner project is created in `CARGO_TARGET_DIR` and executing it will // create a sub target directory inside of `CARGO_TARGET_DIR`. .env_remove("CARGO_TARGET_DIR") + // As we are being called inside a build-script, this env variable is set. However, we set + // our own `RUSTFLAGS` and thus, we need to remove this. Otherwise cargo favors this + // env variable. + .env_remove("CARGO_ENCODED_RUSTFLAGS") // We don't want to call ourselves recursively .env(crate::SKIP_BUILD_ENV, "");