From 6df8909d94482bf4aa586086a28f5befc34676e7 Mon Sep 17 00:00:00 2001 From: Sacha Lansky Date: Mon, 11 Sep 2023 16:39:51 +0100 Subject: [PATCH 01/16] [improve docs]: Sudo Pallet (#1209) * refactor docs * add docify * nit * add #![deny(missing_docs)] * Apply suggestions from code review Co-authored-by: Juan Co-authored-by: Francisco Aguirre --------- Co-authored-by: Oliver Tale-Yazdi Co-authored-by: Juan Co-authored-by: Francisco Aguirre --- Cargo.lock | 1 + substrate/frame/sudo/Cargo.toml | 2 + substrate/frame/sudo/src/lib.rs | 119 ++++++++++++++++++---------- substrate/frame/sudo/src/tests.rs | 3 + substrate/frame/sudo/src/weights.rs | 2 +- 5 files changed, 82 insertions(+), 45 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e675e0f3a28a3..5d21b24804a3d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10510,6 +10510,7 @@ dependencies = [ name = "pallet-sudo" version = "4.0.0-dev" dependencies = [ + "docify", "frame-benchmarking", "frame-support", "frame-system", diff --git a/substrate/frame/sudo/Cargo.toml b/substrate/frame/sudo/Cargo.toml index a75a0c504f983..9b148c0b471dc 100644 --- a/substrate/frame/sudo/Cargo.toml +++ b/substrate/frame/sudo/Cargo.toml @@ -22,6 +22,8 @@ sp-io = { path = "../../primitives/io", default-features = false} sp-runtime = { path = "../../primitives/runtime", default-features = false} sp-std = { path = "../../primitives/std", default-features = false} +docify = "0.2.1" + [dev-dependencies] sp-core = { path = "../../primitives/core" } diff --git a/substrate/frame/sudo/src/lib.rs b/substrate/frame/sudo/src/lib.rs index f735469558c70..0c869bec7f076 100644 --- a/substrate/frame/sudo/src/lib.rs +++ b/substrate/frame/sudo/src/lib.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -15,41 +15,32 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Sudo Pallet -//! -//! - [`Config`] -//! - [`Call`] -//! -//! ## Overview -//! -//! The Sudo pallet allows for a single account (called the "sudo key") -//! to execute dispatchable functions that require a `Root` call -//! or designate a new account to replace them as the sudo key. -//! Only one account can be the sudo key at a time. -//! -//! ## Interface +//! > Made with *Substrate*, for *Polkadot*. //! -//! ### Dispatchable Functions +//! [![github]](https://github.com/paritytech/polkadot-sdk/tree/master/substrate/frame/sudo) +//! [![polkadot]](https://polkadot.network) //! -//! Only the sudo key can call the dispatchable functions from the Sudo pallet. +//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github +//! [polkadot]: https://img.shields.io/badge/polkadot-E6007A?style=for-the-badge&logo=polkadot&logoColor=white //! -//! * `sudo` - Make a `Root` call to a dispatchable function. -//! * `set_key` - Assign a new account to be the sudo key. +//! # Sudo Pallet //! -//! ## Usage +//! A pallet to provide a way to execute privileged runtime calls using a specified sudo ("superuser +//! do") account. //! -//! ### Executing Privileged Functions +//! ## Pallet API //! -//! The Sudo pallet itself is not intended to be used within other pallets. -//! Instead, you can build "privileged functions" (i.e. functions that require `Root` origin) in -//! other pallets. You can execute these privileged functions by calling `sudo` with the sudo key -//! account. Privileged functions cannot be directly executed via an extrinsic. +//! See the [`pallet`] module for more information about the interfaces this pallet exposes, +//! including its configuration trait, dispatchables, storage items, events and errors. //! -//! Learn more about privileged functions and `Root` origin in the [`Origin`] type documentation. +//! ## Overview //! -//! ### Simple Code Snippet +//! In Substrate blockchains, pallets may contain dispatchable calls that can only be called at +//! the system level of the chain (i.e. dispatchables that require a `Root` origin). +//! Setting a privileged account, called the _sudo key_, allows you to make such calls as an +//! extrinisic. //! -//! This is an example of a pallet that exposes a privileged function: +//! Here's an example of a privileged function in another pallet: //! //! ``` //! #[frame_support::pallet] @@ -76,27 +67,58 @@ //! } //! } //! } -//! # fn main() {} //! ``` //! -//! ### Signed Extension +//! With the Sudo pallet configured in your chain's runtime you can execute this privileged +//! function by constructing a call using the [`sudo`](Pallet::sudo) dispatchable. +//! +//! To use this pallet in your runtime, a sudo key must be specified in the [`GenesisConfig`] of +//! the pallet. You can change this key at anytime once your chain is live using the +//! [`set_key`](Pallet::set_key) dispatchable, however only one sudo key can be set at a +//! time. The pallet also allows you to make a call using +//! [`sudo_unchecked_weight`](Pallet::sudo_unchecked_weight), which allows the sudo account to +//! execute a call with a custom weight. +//! +//!
+//! Note: this pallet is not meant to be used inside other pallets. It is only
+//! meant to be used by constructing runtime calls from outside the runtime.
+//! 
+//! +//! This pallet also defines a [`SignedExtension`](sp_runtime::traits::SignedExtension) called +//! [`CheckOnlySudoAccount`] to ensure that only signed transactions by the sudo account are +//! accepted by the transaction pool. The intended use of this signed extension is to prevent other +//! accounts from spamming the transaction pool for the initial phase of a chain, during which +//! developers may only want a sudo account to be able to make transactions. +//! +//! Learn more about the `Root` origin in the [`RawOrigin`](frame_system::RawOrigin) type +//! documentation. //! -//! The Sudo pallet defines the following extension: +//! ### Examples //! -//! - [`CheckOnlySudoAccount`]: Ensures that the signed transactions are only valid if they are -//! signed by sudo account. +//! 1. You can make a privileged runtime call using `sudo` with an account that matches the sudo +//! key. +#![doc = docify::embed!("src/tests.rs", sudo_basics)] //! -//! ## Genesis Config +//! 2. Only an existing sudo key can set a new one. +#![doc = docify::embed!("src/tests.rs", set_key_basics)] //! -//! The Sudo pallet depends on the [`GenesisConfig`]. -//! You need to set an initial superuser account as the sudo `key`. +//! 3. You can also make non-privileged calls using `sudo_as`. +#![doc = docify::embed!("src/tests.rs", sudo_as_emits_events_correctly)] //! -//! ## Related Pallets +//! ## Low Level / Implementation Details //! -//! * [Democracy](../pallet_democracy/index.html) +//! This pallet checks that the caller of its dispatchables is a signed account and ensures that the +//! caller matches the sudo key in storage. +//! A caller of this pallet's dispatchables does not pay any fees to dispatch a call. If the account +//! making one of these calls is not the sudo key, the pallet returns a [`Error::RequireSudo`] +//! error. //! -//! [`Origin`]: https://docs.substrate.io/main-docs/build/origins/ +//! Once an origin is verified, sudo calls use `dispatch_bypass_filter` from the +//! [`UnfilteredDispatchable`](frame_support::traits::UnfilteredDispatchable) trait to allow call +//! execution without enforcing any further origin checks. +#![deny(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] use sp_runtime::{traits::StaticLookup, DispatchResult}; @@ -261,12 +283,21 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - /// A sudo just took place. \[result\] - Sudid { sudo_result: DispatchResult }, - /// The \[sudoer\] just switched identity; the old key is supplied if one existed. - KeyChanged { old_sudoer: Option }, - /// A sudo just took place. \[result\] - SudoAsDone { sudo_result: DispatchResult }, + /// A sudo call just took place. + Sudid { + /// The result of the call made by the sudo user. + sudo_result: DispatchResult, + }, + /// The sudo key has been updated. + KeyChanged { + /// The old sudo key if one was previously set. + old_sudoer: Option, + }, + /// A [sudo_as](Pallet::sudo_as) call just took place. + SudoAsDone { + /// The result of the call made by the sudo user. + sudo_result: DispatchResult, + }, } #[pallet::error] diff --git a/substrate/frame/sudo/src/tests.rs b/substrate/frame/sudo/src/tests.rs index c854fed8f0736..6963ba2e6a051 100644 --- a/substrate/frame/sudo/src/tests.rs +++ b/substrate/frame/sudo/src/tests.rs @@ -34,6 +34,7 @@ fn test_setup_works() { }); } +#[docify::export] #[test] fn sudo_basics() { // Configure a default test environment and set the root `key` to 1. @@ -134,6 +135,7 @@ fn sudo_unchecked_weight_emits_events_correctly() { }) } +#[docify::export] #[test] fn set_key_basics() { new_test_ext(1).execute_with(|| { @@ -195,6 +197,7 @@ fn sudo_as_basics() { }); } +#[docify::export] #[test] fn sudo_as_emits_events_correctly() { new_test_ext(1).execute_with(|| { diff --git a/substrate/frame/sudo/src/weights.rs b/substrate/frame/sudo/src/weights.rs index 6a0197d1469b4..0cdd0c8a81f2c 100644 --- a/substrate/frame/sudo/src/weights.rs +++ b/substrate/frame/sudo/src/weights.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_sudo +//! Autogenerated weights for pallet_sudo. //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev //! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` From 4b8bd9060e66932f5e038e16478c089191b19723 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Mon, 11 Sep 2023 17:54:27 +0200 Subject: [PATCH 02/16] Delete staking miner (#1480) * Delete staking miner New repo should be used instead https://github.com/paritytech/staking-miner-v2 Signed-off-by: Oliver Tale-Yazdi * Remove staking-miner CI jobs Signed-off-by: Oliver Tale-Yazdi --------- Signed-off-by: Oliver Tale-Yazdi --- .../workflows/release-50_publish-docker.yml | 13 - .gitlab/pipeline/build.yml | 22 +- .gitlab/pipeline/publish.yml | 18 - Cargo.lock | 120 +--- Cargo.toml | 1 - .../staking-miner_builder.Dockerfile | 46 -- .../staking-miner_injected.Dockerfile | 43 -- docker/scripts/staking-miner/README.md | 37 - .../scripts/staking-miner/build-injected.sh | 13 - docker/scripts/staking-miner/build.sh | 13 - .../staking-miner_Dockerfile.README.md | 3 - .../staking-miner_builder.Dockerfile | 43 -- docker/scripts/staking-miner/test-build.sh | 18 - polkadot/utils/staking-miner/.gitignore | 2 - polkadot/utils/staking-miner/Cargo.toml | 54 -- polkadot/utils/staking-miner/README.md | 81 --- polkadot/utils/staking-miner/src/dry_run.rs | 166 ----- .../staking-miner/src/emergency_solution.rs | 65 -- polkadot/utils/staking-miner/src/main.rs | 665 ------------------ polkadot/utils/staking-miner/src/monitor.rs | 478 ------------- polkadot/utils/staking-miner/src/opts.rs | 366 ---------- polkadot/utils/staking-miner/src/prelude.rs | 55 -- polkadot/utils/staking-miner/src/rpc.rs | 182 ----- .../staking-miner/src/runtime_versions.rs | 90 --- polkadot/utils/staking-miner/src/signer.rs | 84 --- polkadot/utils/staking-miner/tests/cli.rs | 49 -- .../election-provider-multi-phase/src/lib.rs | 3 +- 27 files changed, 8 insertions(+), 2722 deletions(-) delete mode 100644 docker/dockerfiles/staking-miner/staking-miner_builder.Dockerfile delete mode 100644 docker/dockerfiles/staking-miner/staking-miner_injected.Dockerfile delete mode 100644 docker/scripts/staking-miner/README.md delete mode 100755 docker/scripts/staking-miner/build-injected.sh delete mode 100755 docker/scripts/staking-miner/build.sh delete mode 100644 docker/scripts/staking-miner/staking-miner_Dockerfile.README.md delete mode 100644 docker/scripts/staking-miner/staking-miner_builder.Dockerfile delete mode 100755 docker/scripts/staking-miner/test-build.sh delete mode 100644 polkadot/utils/staking-miner/.gitignore delete mode 100644 polkadot/utils/staking-miner/Cargo.toml delete mode 100644 polkadot/utils/staking-miner/README.md delete mode 100644 polkadot/utils/staking-miner/src/dry_run.rs delete mode 100644 polkadot/utils/staking-miner/src/emergency_solution.rs delete mode 100644 polkadot/utils/staking-miner/src/main.rs delete mode 100644 polkadot/utils/staking-miner/src/monitor.rs delete mode 100644 polkadot/utils/staking-miner/src/opts.rs delete mode 100644 polkadot/utils/staking-miner/src/prelude.rs delete mode 100644 polkadot/utils/staking-miner/src/rpc.rs delete mode 100644 polkadot/utils/staking-miner/src/runtime_versions.rs delete mode 100644 polkadot/utils/staking-miner/src/signer.rs delete mode 100644 polkadot/utils/staking-miner/tests/cli.rs diff --git a/.github/workflows/release-50_publish-docker.yml b/.github/workflows/release-50_publish-docker.yml index 512b91aa6e554..7fdfc23035474 100644 --- a/.github/workflows/release-50_publish-docker.yml +++ b/.github/workflows/release-50_publish-docker.yml @@ -47,7 +47,6 @@ on: type: choice options: - polkadot - - staking-miner - polkadot-parachain permissions: @@ -158,18 +157,6 @@ jobs: echo "tag=latest" >> $GITHUB_OUTPUT echo "release=${release}" >> $GITHUB_OUTPUT - - name: Build Injected Container image for polkadot/staking-miner - if: ${{ env.BINARY == 'polkadot' || env.BINARY == 'staking-miner' }} - env: - ARTIFACTS_FOLDER: ./release-artifacts - IMAGE_NAME: ${{ env.BINARY }} - OWNER: ${{ env.DOCKER_OWNER }} - TAGS: ${{ join(steps.fetch_rc_refs.outputs.*, ',') || join(steps.fetch_release_refs.outputs.*, ',') }} - run: | - ls -al - echo "Building container for $BINARY" - ./docker/scripts/build-injected.sh - - name: Build Injected Container image for polkadot-parachain if: ${{ env.BINARY == 'polkadot-parachain' }} env: diff --git a/.gitlab/pipeline/build.yml b/.gitlab/pipeline/build.yml index c4dfc0dd0931c..328b37af1d4de 100644 --- a/.gitlab/pipeline/build.yml +++ b/.gitlab/pipeline/build.yml @@ -77,26 +77,6 @@ build-malus: - echo "polkadot-test-malus = $(cat ./artifacts/VERSION) (EXTRATAG = $(cat ./artifacts/EXTRATAG))" - cp -r ./docker/* ./artifacts -build-staking-miner: - stage: build - extends: - - .docker-env - - .common-refs - # - .collect-artifacts - # DAG - needs: - - job: build-malus - artifacts: false - script: - - time cargo build -q --locked --release --package staging-staking-miner - # # pack artifacts - # - mkdir -p ./artifacts - # - mv ./target/release/staking-miner ./artifacts/. - # - echo -n "${CI_COMMIT_REF_NAME}" > ./artifacts/VERSION - # - echo -n "${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHORT_SHA}" > ./artifacts/EXTRATAG - # - echo "staking-miner = $(cat ./artifacts/VERSION) (EXTRATAG = $(cat ./artifacts/EXTRATAG))" - # - cp -r ./scripts/* ./artifacts - build-rustdoc: stage: build extends: @@ -358,7 +338,7 @@ build-subkey-linux: extends: .build-subkey # DAG needs: - - job: build-staking-miner + - job: build-malus artifacts: false # tbd # build-subkey-macos: diff --git a/.gitlab/pipeline/publish.yml b/.gitlab/pipeline/publish.yml index 1a513e5970d5a..9e24b8606a4de 100644 --- a/.gitlab/pipeline/publish.yml +++ b/.gitlab/pipeline/publish.yml @@ -328,24 +328,6 @@ build-push-image-substrate-pr: # # this artifact is used in zombienet-tests job # dotenv: ./artifacts/malus.env -# publish-staking-miner-image: -# stage: publish -# extends: -# - .kubernetes-env -# - .build-push-image -# - .publish-refs -# variables: -# CI_IMAGE: ${BUILDAH_IMAGE} -# # scripts/ci/dockerfiles/staking-miner/staking-miner_injected.Dockerfile -# DOCKERFILE: ci/dockerfiles/staking-miner/staking-miner_injected.Dockerfile -# IMAGE_NAME: docker.io/paritytech/staking-miner -# GIT_STRATEGY: none -# DOCKER_USER: ${Docker_Hub_User_Parity} -# DOCKER_PASS: ${Docker_Hub_Pass_Parity} -# needs: -# - job: build-staking-miner -# artifacts: true - # substrate # publish-substrate-image-pr: diff --git a/Cargo.lock b/Cargo.lock index 5d21b24804a3d..3d4bc17563ba7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -494,7 +494,7 @@ dependencies = [ "ark-ff", "ark-std", "tracing", - "tracing-subscriber 0.2.25", + "tracing-subscriber", ] [[package]] @@ -4838,12 +4838,6 @@ dependencies = [ "futures", ] -[[package]] -name = "exitcode" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de853764b47027c2e862a995c34978ffa63c1501f2e15f987ba11bd4f9bba193" - [[package]] name = "expander" version = "0.0.4" @@ -7690,15 +7684,6 @@ dependencies = [ "regex-automata 0.1.10", ] -[[package]] -name = "matchers" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" -dependencies = [ - "regex-automata 0.1.10", -] - [[package]] name = "matches" version = "0.1.10" @@ -8542,16 +8527,6 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" -[[package]] -name = "nu-ansi-term" -version = "0.46.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" -dependencies = [ - "overload", - "winapi", -] - [[package]] name = "num" version = "0.4.1" @@ -8771,12 +8746,6 @@ version = "6.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d5d9eb14b174ee9aa2ef96dc2b94637a2d4b6e7cb873c7e171f0c20c6cf3eac" -[[package]] -name = "overload" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" - [[package]] name = "owo-colors" version = "3.5.0" @@ -15117,7 +15086,7 @@ dependencies = [ "substrate-test-runtime", "tempfile", "tracing", - "tracing-subscriber 0.2.25", + "tracing-subscriber", "wat", ] @@ -15815,7 +15784,7 @@ dependencies = [ "thiserror", "tracing", "tracing-log", - "tracing-subscriber 0.2.25", + "tracing-subscriber", ] [[package]] @@ -16394,18 +16363,6 @@ dependencies = [ "libc", ] -[[package]] -name = "signal-hook-tokio" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "213241f76fb1e37e27de3b6aa1b068a2c333233b59cca6634f634b80a27ecf1e" -dependencies = [ - "futures-core", - "libc", - "signal-hook", - "tokio", -] - [[package]] name = "signature" version = "1.6.4" @@ -17525,7 +17482,7 @@ dependencies = [ "sp-std", "tracing", "tracing-core", - "tracing-subscriber 0.2.25", + "tracing-subscriber", ] [[package]] @@ -17807,47 +17764,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "staging-staking-miner" -version = "1.0.0" -dependencies = [ - "assert_cmd", - "clap 4.4.2", - "exitcode", - "frame-election-provider-support", - "frame-remote-externalities", - "frame-support", - "frame-system", - "futures-util", - "jsonrpsee", - "log", - "pallet-balances", - "pallet-election-provider-multi-phase", - "pallet-staking", - "pallet-transaction-payment", - "parity-scale-codec", - "paste", - "polkadot-core-primitives", - "polkadot-runtime", - "polkadot-runtime-common", - "sc-transaction-pool-api", - "serde", - "serde_json", - "signal-hook", - "signal-hook-tokio", - "sp-core", - "sp-npos-elections", - "sp-runtime", - "sp-state-machine", - "sp-version", - "staging-kusama-runtime", - "sub-tokens", - "thiserror", - "tokio", - "tracing-subscriber 0.3.17", - "westend-runtime", -] - [[package]] name = "staging-xcm" version = "1.0.0" @@ -18022,14 +17938,6 @@ dependencies = [ "webrtc-util", ] -[[package]] -name = "sub-tokens" -version = "0.1.0" -source = "git+https://github.com/paritytech/substrate-debug-kit?branch=master#e12503ab781e913735dc389865a3b8b4a6c6399d" -dependencies = [ - "separator", -] - [[package]] name = "subkey" version = "3.0.0" @@ -19111,7 +19019,7 @@ dependencies = [ "ansi_term", "chrono", "lazy_static", - "matchers 0.0.1", + "matchers", "parking_lot 0.11.2", "regex", "serde", @@ -19125,24 +19033,6 @@ dependencies = [ "tracing-serde", ] -[[package]] -name = "tracing-subscriber" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" -dependencies = [ - "matchers 0.1.0", - "nu-ansi-term", - "once_cell", - "regex", - "sharded-slab", - "smallvec", - "thread_local", - "tracing", - "tracing-core", - "tracing-log", -] - [[package]] name = "trie-bench" version = "0.37.0" diff --git a/Cargo.toml b/Cargo.toml index 5c43990d47299..89fb007058aa3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -172,7 +172,6 @@ members = [ "polkadot/statement-table", "polkadot/utils/generate-bags", "polkadot/utils/remote-ext-tests/bags-list", - "polkadot/utils/staking-miner", "polkadot/xcm", "polkadot/xcm/pallet-xcm", "polkadot/xcm/pallet-xcm-benchmarks", diff --git a/docker/dockerfiles/staking-miner/staking-miner_builder.Dockerfile b/docker/dockerfiles/staking-miner/staking-miner_builder.Dockerfile deleted file mode 100644 index a1932095fd4ca..0000000000000 --- a/docker/dockerfiles/staking-miner/staking-miner_builder.Dockerfile +++ /dev/null @@ -1,46 +0,0 @@ -FROM paritytech/ci-linux:production as builder - -# metadata -ARG VCS_REF -ARG BUILD_DATE -ARG IMAGE_NAME="staking-miner" -ARG PROFILE=release - -LABEL description="This is the build stage. Here we create the binary." - -WORKDIR /app -COPY . /app -RUN cargo build --locked --$PROFILE --package staking-miner - -# ===== SECOND STAGE ====== - -FROM docker.io/library/ubuntu:20.04 -LABEL description="This is the 2nd stage: a very small image where we copy the binary." -LABEL io.parity.image.authors="devops-team@parity.io" \ - io.parity.image.vendor="Parity Technologies" \ - io.parity.image.title="${IMAGE_NAME}" \ - io.parity.image.description="${IMAGE_NAME} for substrate based chains" \ - io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/${IMAGE_NAME}/${IMAGE_NAME}_builder.Dockerfile" \ - io.parity.image.revision="${VCS_REF}" \ - io.parity.image.created="${BUILD_DATE}" \ - io.parity.image.documentation="https://github.com/paritytech/polkadot/" - -ARG PROFILE=release -COPY --from=builder /app/target/$PROFILE/staking-miner /usr/local/bin - -RUN useradd -u 1000 -U -s /bin/sh miner && \ - rm -rf /usr/bin /usr/sbin - -# show backtraces -ENV RUST_BACKTRACE 1 - -USER miner - -ENV SEED="" -ENV URI="wss://rpc.polkadot.io" -ENV RUST_LOG="info" - -# check if the binary works in this container -RUN /usr/local/bin/staking-miner --version - -ENTRYPOINT [ "/usr/local/bin/staking-miner" ] diff --git a/docker/dockerfiles/staking-miner/staking-miner_injected.Dockerfile b/docker/dockerfiles/staking-miner/staking-miner_injected.Dockerfile deleted file mode 100644 index 4901ab4a3736e..0000000000000 --- a/docker/dockerfiles/staking-miner/staking-miner_injected.Dockerfile +++ /dev/null @@ -1,43 +0,0 @@ -FROM docker.io/library/ubuntu:20.04 - -# metadata -ARG VCS_REF -ARG BUILD_DATE -ARG IMAGE_NAME="staking-miner" - -LABEL io.parity.image.authors="devops-team@parity.io" \ - io.parity.image.vendor="Parity Technologies" \ - io.parity.image.title="${IMAGE_NAME}" \ - io.parity.image.description="${IMAGE_NAME} for substrate based chains" \ - io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/${IMAGE_NAME}/${IMAGE_NAME}_injected.Dockerfile" \ - io.parity.image.revision="${VCS_REF}" \ - io.parity.image.created="${BUILD_DATE}" \ - io.parity.image.documentation="https://github.com/paritytech/polkadot/" - -# show backtraces -ENV RUST_BACKTRACE 1 - -# install tools and dependencies -RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y \ - libssl1.1 \ - ca-certificates && \ -# apt cleanup - apt-get autoremove -y && \ - apt-get clean && \ - find /var/lib/apt/lists/ -type f -not -name lock -delete; \ - useradd -u 1000 -U -s /bin/sh miner - -# add binary to docker image -COPY ./staking-miner /usr/local/bin - -USER miner - -ENV SEED="" -ENV URI="wss://rpc.polkadot.io" -ENV RUST_LOG="info" - -# check if the binary works in this container -RUN /usr/local/bin/staking-miner --version - -ENTRYPOINT [ "/usr/local/bin/staking-miner" ] diff --git a/docker/scripts/staking-miner/README.md b/docker/scripts/staking-miner/README.md deleted file mode 100644 index 3610e11303167..0000000000000 --- a/docker/scripts/staking-miner/README.md +++ /dev/null @@ -1,37 +0,0 @@ -# staking-miner container image - -## Build using the Builder - -``` -./build.sh -``` - -## Build the injected Image - -You first need a valid Linux binary to inject. Let's assume this binary is located in `BIN_FOLDER`. - -``` -./build-injected.sh "$BIN_FOLDER" -``` - -## Test - -Here is how to test the image. We can generate a valid seed but the staking-miner will quickly notice that our -account is not funded and "does not exist". - -You may pass any ENV supported by the binary and must provide at least a few such as `SEED` and `URI`: -``` -ENV SEED="" -ENV URI="wss://rpc.polkadot.io:443" -ENV RUST_LOG="info" -``` - -``` -export SEED=$(subkey generate -n polkadot --output-type json | jq -r .secretSeed) -podman run --rm -it \ - -e URI="wss://rpc.polkadot.io:443" \ - -e RUST_LOG="info" \ - -e SEED \ - localhost/parity/staking-miner \ - dry-run seq-phragmen -``` diff --git a/docker/scripts/staking-miner/build-injected.sh b/docker/scripts/staking-miner/build-injected.sh deleted file mode 100755 index efe323b5fed85..0000000000000 --- a/docker/scripts/staking-miner/build-injected.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env bash - -# Sample call: -# $0 /path/to/folder_with_staking-miner_binary -# This script replace the former dedicated staking-miner "injected" Dockerfile -# and shows how to use the generic binary_injected.dockerfile - -PROJECT_ROOT=`git rev-parse --show-toplevel` - -export BINARY=staking-miner -export ARTIFACTS_FOLDER=$1 - -$PROJECT_ROOT/docker/scripts/build-injected.sh diff --git a/docker/scripts/staking-miner/build.sh b/docker/scripts/staking-miner/build.sh deleted file mode 100755 index c2b6ab77e531d..0000000000000 --- a/docker/scripts/staking-miner/build.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env bash - -# Sample call: -# $0 /path/to/folder_with_staking-miner_binary -# This script replace the former dedicated staking-miner "injected" Dockerfile -# and shows how to use the generic binary_injected.dockerfile - -PROJECT_ROOT=`git rev-parse --show-toplevel` -ENGINE=podman - -echo "Building the staking-miner using the Builder image" -echo "PROJECT_ROOT=$PROJECT_ROOT" -$ENGINE build -t staking-miner -f "${PROJECT_ROOT}/docker/dockerfiles/staking-miner/staking-miner_builder.Dockerfile" "$PROJECT_ROOT" diff --git a/docker/scripts/staking-miner/staking-miner_Dockerfile.README.md b/docker/scripts/staking-miner/staking-miner_Dockerfile.README.md deleted file mode 100644 index ce424c42f479a..0000000000000 --- a/docker/scripts/staking-miner/staking-miner_Dockerfile.README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Staking-miner Docker image - -## [GitHub](https://github.com/paritytech/polkadot/tree/master/utils/staking-miner) diff --git a/docker/scripts/staking-miner/staking-miner_builder.Dockerfile b/docker/scripts/staking-miner/staking-miner_builder.Dockerfile deleted file mode 100644 index 0ae77f36c79d0..0000000000000 --- a/docker/scripts/staking-miner/staking-miner_builder.Dockerfile +++ /dev/null @@ -1,43 +0,0 @@ -FROM paritytech/ci-linux:production as builder - -# metadata -ARG VCS_REF -ARG BUILD_DATE -ARG IMAGE_NAME="staking-miner" -ARG PROFILE=production - -LABEL description="This is the build stage. Here we create the binary." - -WORKDIR /app -COPY . /app -RUN cargo build --locked --profile $PROFILE --package staking-miner - -# ===== SECOND STAGE ====== - -FROM docker.io/parity/base-bin:latest -LABEL description="This is the 2nd stage: a very small image where we copy the binary." -LABEL io.parity.image.authors="devops-team@parity.io" \ - io.parity.image.vendor="Parity Technologies" \ - io.parity.image.title="${IMAGE_NAME}" \ - io.parity.image.description="${IMAGE_NAME} for substrate based chains" \ - io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/${IMAGE_NAME}/${IMAGE_NAME}_builder.Dockerfile" \ - io.parity.image.revision="${VCS_REF}" \ - io.parity.image.created="${BUILD_DATE}" \ - io.parity.image.documentation="https://github.com/paritytech/polkadot/" - -ARG PROFILE=release -COPY --from=builder /app/target/$PROFILE/staking-miner /usr/local/bin - -# show backtraces -ENV RUST_BACKTRACE 1 - -USER parity - -ENV SEED="" -ENV URI="wss://rpc.polkadot.io" -ENV RUST_LOG="info" - -# check if the binary works in this container -RUN /usr/local/bin/staking-miner --version - -ENTRYPOINT [ "/usr/local/bin/staking-miner" ] diff --git a/docker/scripts/staking-miner/test-build.sh b/docker/scripts/staking-miner/test-build.sh deleted file mode 100755 index 0ce74e2df296d..0000000000000 --- a/docker/scripts/staking-miner/test-build.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash - -TMP=$(mktemp -d) -ENGINE=${ENGINE:-podman} - -# You need to build an injected image first - -# Fetch some binaries -$ENGINE run --user root --rm -i \ - -v "$TMP:/export" \ - --entrypoint /bin/bash \ - parity/staking-miner -c \ - 'cp "$(which staking-miner)" /export' - -echo "Checking binaries we got:" -tree $TMP - -./build-injected.sh $TMP diff --git a/polkadot/utils/staking-miner/.gitignore b/polkadot/utils/staking-miner/.gitignore deleted file mode 100644 index db7cff848330b..0000000000000 --- a/polkadot/utils/staking-miner/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*.key -*.bin diff --git a/polkadot/utils/staking-miner/Cargo.toml b/polkadot/utils/staking-miner/Cargo.toml deleted file mode 100644 index 4b012e3ac73f7..0000000000000 --- a/polkadot/utils/staking-miner/Cargo.toml +++ /dev/null @@ -1,54 +0,0 @@ -[[bin]] -name = "staging-staking-miner" -path = "src/main.rs" - -[package] -name = "staging-staking-miner" -version = "1.0.0" -authors.workspace = true -edition.workspace = true -license.workspace = true -publish = false - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } -clap = { version = "4.4.2", features = ["derive", "env"] } -tracing-subscriber = { version = "0.3.11", features = ["env-filter"] } -jsonrpsee = { version = "0.16.2", features = ["ws-client", "macros"] } -log = "0.4.17" -paste = "1.0.7" -serde = "1.0.188" -serde_json = "1.0" -thiserror = "1.0.48" -tokio = { version = "1.24.2", features = ["macros", "rt-multi-thread", "sync"] } -remote-externalities = { package = "frame-remote-externalities" , path = "../../../substrate/utils/frame/remote-externalities" } -signal-hook-tokio = { version = "0.3", features = ["futures-v0_3"] } -sp-core = { path = "../../../substrate/primitives/core" } -sp-version = { path = "../../../substrate/primitives/version" } -sp-state-machine = { path = "../../../substrate/primitives/state-machine" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -sp-npos-elections = { path = "../../../substrate/primitives/npos-elections" } -sc-transaction-pool-api = { path = "../../../substrate/client/transaction-pool/api" } - -frame-system = { path = "../../../substrate/frame/system" } -frame-support = { path = "../../../substrate/frame/support" } -frame-election-provider-support = { path = "../../../substrate/frame/election-provider-support" } -pallet-election-provider-multi-phase = { path = "../../../substrate/frame/election-provider-multi-phase" } -pallet-staking = { path = "../../../substrate/frame/staking" } -pallet-balances = { path = "../../../substrate/frame/balances" } -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment" } - -core-primitives = { package = "polkadot-core-primitives", path = "../../core-primitives" } - -runtime-common = { package = "polkadot-runtime-common", path = "../../runtime/common" } -polkadot-runtime = { path = "../../runtime/polkadot" } -kusama-runtime = { package = "staging-kusama-runtime", path = "../../runtime/kusama" } -westend-runtime = { path = "../../runtime/westend" } -exitcode = "1.1" - -sub-tokens = { git = "https://github.com/paritytech/substrate-debug-kit", branch = "master" } -signal-hook = "0.3" -futures-util = "0.3" - -[dev-dependencies] -assert_cmd = "2.0.4" diff --git a/polkadot/utils/staking-miner/README.md b/polkadot/utils/staking-miner/README.md deleted file mode 100644 index 90a00eeac089d..0000000000000 --- a/polkadot/utils/staking-miner/README.md +++ /dev/null @@ -1,81 +0,0 @@ -# Staking Miner - -Substrate chains validators compute a basic solution for the NPoS election. The optimization of the solution is -computing-intensive and can be delegated to the `staking-miner`. The `staking-miner` does not act as validator and -focuses solely on the optimization of the solution. - -The staking miner connects to a specified chain and keeps listening to new Signed phase of the -[pallet-election-provider-multi-phase](https://crates.parity.io/pallet_election_provider_multi_phase/index.html) in -order to submit solutions to the NPoS election. When the correct time comes, it computes its solution and submit it to -the chain. The default miner algorithm is -[sequential-phragmen](https://crates.parity.io/sp_npos_elections/phragmen/fn.seq_phragmen_core.html)] with a -configurable number of balancing iterations that improve the score. - -Running the staking-miner requires passing the seed of a funded account in order to pay the fees for the transactions -that will be sent. The same account's balance is used to reserve deposits as well. The best solution in each round is -rewarded. All correct solutions will get their bond back. Any invalid solution will lose their bond. - -You can check the help with: -``` -staking-miner --help -``` - -## Building - -You can build from the root of the Polkadot repository using: -``` -cargo build --profile production --locked --package staking-miner --bin staking-miner -``` - -## Docker - -There are 2 options to build a staking-miner Docker image: -- injected binary: the binary is first built on a Linux host and then injected into a Docker base image. This method - only works if you have a Linux host or access to a pre-built binary from a Linux host. -- multi-stage: the binary is entirely built within the multi-stage Docker image. There is no requirement on the host in - terms of OS and the host does not even need to have any Rust toolchain installed. - -### Building the injected image - -First build the binary as documented [above](#building). You may then inject the binary into a Docker base image: -`parity/base-bin` (running the command from the root of the Polkadot repository): -``` -TODO: UPDATE THAT -docker build -t staking-miner -f scripts/ci/dockerfiles/staking-miner/staking-miner_injected.Dockerfile target/release -``` - -### Building the multi-stage image - -Unlike the injected image that requires a Linux pre-built binary, this option does not requires a Linux host, nor Rust -to be installed. The trade-off however is that it takes a little longer to build and this option is less ideal for CI -tasks. You may build the multi-stage image the root of the Polkadot repository with: -``` -TODO: UPDATE THAT -docker build -t staking-miner -f docker/dockerfiles/staking-miner/staking-miner_builder.Dockerfile . -``` - -### Running - -A Docker container, especially one holding one of your `SEED` should be kept as secure as possible. While it won't -prevent a malicious actor to read your `SEED` if they gain access to your container, it is nonetheless recommended -running this container in `read-only` mode: - -``` -# The following line starts with an extra space on purpose: - SEED=0x1234... - -docker run --rm -i \ - --name staking-miner \ - --read-only \ - -e RUST_LOG=info \ - -e SEED=$SEED \ - -e URI=wss://your-node:9944 \ - staking-miner dry-run -``` - -### Test locally - -Make sure you've built Polkadot, then: - -1. `cargo run -p polkadot --features fast-runtime -- --chain polkadot-dev --tmp --alice -lruntime=debug` -2. `cargo run -p staking-miner -- --uri ws://localhost:9944 monitor --seed-or-path //Alice phrag-mms` diff --git a/polkadot/utils/staking-miner/src/dry_run.rs b/polkadot/utils/staking-miner/src/dry_run.rs deleted file mode 100644 index 7e46f630a1f5e..0000000000000 --- a/polkadot/utils/staking-miner/src/dry_run.rs +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! The dry-run command. - -use crate::{opts::DryRunConfig, prelude::*, rpc::*, signer::Signer, Error, SharedRpcClient}; -use codec::Encode; -use frame_support::traits::Currency; -use sp_core::Bytes; -use sp_npos_elections::ElectionScore; - -/// Forcefully create the snapshot. This can be used to compute the election at anytime. -fn force_create_snapshot(ext: &mut Ext) -> Result<(), Error> { - ext.execute_with(|| { - if >::exists() { - log::info!(target: LOG_TARGET, "snapshot already exists."); - Ok(()) - } else { - log::info!(target: LOG_TARGET, "creating a fake snapshot now."); - >::create_snapshot().map(|_| ()).map_err(Into::into) - } - }) -} - -/// Helper method to print the encoded size of the snapshot. -async fn print_info( - rpc: &SharedRpcClient, - ext: &mut Ext, - raw_solution: &EPM::RawSolution>, - extrinsic: &Bytes, -) where - ::Currency: Currency, -{ - ext.execute_with(|| { - log::info!( - target: LOG_TARGET, - "Snapshot Metadata: {:?}", - >::snapshot_metadata() - ); - log::info!( - target: LOG_TARGET, - "Snapshot Encoded Length: {:?}", - >::snapshot() - .expect("snapshot must exist before calling `measure_snapshot_size`") - .encode() - .len() - ); - - let snapshot_size = - >::snapshot_metadata().expect("snapshot must exist by now; qed."); - let deposit = EPM::Pallet::::deposit_for(raw_solution, snapshot_size); - - let score = { - let ElectionScore { minimal_stake, sum_stake, sum_stake_squared } = raw_solution.score; - [Token::from(minimal_stake), Token::from(sum_stake), Token::from(sum_stake_squared)] - }; - - log::info!( - target: LOG_TARGET, - "solution score {:?} / deposit {:?} / length {:?}", - score, - Token::from(deposit), - raw_solution.encode().len(), - ); - }); - - let info = rpc.payment_query_info(&extrinsic, None).await; - - log::info!( - target: LOG_TARGET, - "payment_queryInfo: (fee = {}) {:?}", - info.as_ref() - .map(|d| Token::from(d.partial_fee)) - .unwrap_or_else(|_| Token::from(0)), - info, - ); -} - -/// Find the stake threshold in order to have at most `count` voters. -#[allow(unused)] -fn find_threshold(ext: &mut Ext, count: usize) { - ext.execute_with(|| { - let mut voters = >::snapshot() - .expect("snapshot must exist before calling `measure_snapshot_size`") - .voters; - voters.sort_by_key(|(_voter, weight, _targets)| std::cmp::Reverse(*weight)); - match voters.get(count) { - Some(threshold_voter) => println!("smallest allowed voter is {:?}", threshold_voter), - None => { - println!("requested truncation to {} voters but had only {}", count, voters.len()); - println!("smallest current voter: {:?}", voters.last()); - }, - } - }) -} - -macro_rules! dry_run_cmd_for { ($runtime:ident) => { paste::paste! { - /// Execute the dry-run command. - pub(crate) async fn []( - rpc: SharedRpcClient, - config: DryRunConfig, - signer: Signer, - ) -> Result<(), Error<$crate::[<$runtime _runtime_exports>]::Runtime>> { - use $crate::[<$runtime _runtime_exports>]::*; - let pallets = if config.force_snapshot { - vec!["Staking".to_string(), "BagsList".to_string()] - } else { - Default::default() - }; - let mut ext = crate::create_election_ext::(rpc.clone(), config.at, pallets).await?; - if config.force_snapshot { - force_create_snapshot::(&mut ext)?; - }; - - log::debug!(target: LOG_TARGET, "solving with {:?}", config.solver); - let raw_solution = crate::mine_with::(&config.solver, &mut ext, false)?; - - let nonce = crate::get_account_info::(&rpc, &signer.account, config.at) - .await? - .map(|i| i.nonce) - .expect("signer account is checked to exist upon startup; it can only die if it \ - transfers funds out of it, or get slashed. If it does not exist at this point, \ - it is likely due to a bug, or the signer got slashed. Terminating." - ); - let tip = 0 as Balance; - let era = sp_runtime::generic::Era::Immortal; - let extrinsic = ext.execute_with(|| create_uxt(raw_solution.clone(), signer.clone(), nonce, tip, era)); - - let bytes = sp_core::Bytes(extrinsic.encode().to_vec()); - print_info::(&rpc, &mut ext, &raw_solution, &bytes).await; - - let feasibility_result = ext.execute_with(|| { - EPM::Pallet::::feasibility_check(raw_solution.clone(), EPM::ElectionCompute::Signed) - }); - log::info!(target: LOG_TARGET, "feasibility result is {:?}", feasibility_result.map(|_| ())); - - let dispatch_result = ext.execute_with(|| { - // manually tweak the phase. - EPM::CurrentPhase::::put(EPM::Phase::Signed); - EPM::Pallet::::submit(frame_system::RawOrigin::Signed(signer.account).into(), Box::new(raw_solution)) - }); - log::info!(target: LOG_TARGET, "dispatch result is {:?}", dispatch_result); - - let dry_run_fut = rpc.dry_run(&bytes, None); - let outcome: sp_runtime::ApplyExtrinsicResult = await_request_and_decode(dry_run_fut).await.map_err::, _>(Into::into)?; - log::info!(target: LOG_TARGET, "dry-run outcome is {:?}", outcome); - Ok(()) - } -}}} - -dry_run_cmd_for!(polkadot); -dry_run_cmd_for!(kusama); -dry_run_cmd_for!(westend); diff --git a/polkadot/utils/staking-miner/src/emergency_solution.rs b/polkadot/utils/staking-miner/src/emergency_solution.rs deleted file mode 100644 index 9ea9f90756e22..0000000000000 --- a/polkadot/utils/staking-miner/src/emergency_solution.rs +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! The emergency-solution command. - -use crate::{prelude::*, EmergencySolutionConfig, Error, SharedRpcClient}; -use codec::Encode; -use std::io::Write; - -macro_rules! emergency_solution_cmd_for { ($runtime:ident) => { paste::paste! { - /// Execute the emergency-solution command. - pub(crate) async fn []( - client: SharedRpcClient, - config: EmergencySolutionConfig, - ) -> Result<(), Error<$crate::[<$runtime _runtime_exports>]::Runtime>> { - use $crate::[<$runtime _runtime_exports>]::*; - - let mut ext = crate::create_election_ext::(client, config.at, vec![]).await?; - let raw_solution = crate::mine_with::(&config.solver, &mut ext, false)?; - - ext.execute_with(|| { - assert!(EPM::Pallet::::current_phase().is_emergency()); - - log::info!(target: LOG_TARGET, "mined solution with {:?}", &raw_solution.score); - - let ready_solution = EPM::Pallet::::feasibility_check(raw_solution, EPM::ElectionCompute::Signed)?; - let encoded_size = ready_solution.encoded_size(); - let score = ready_solution.score; - let mut supports = ready_solution.supports.into_inner(); - // maybe truncate. - if let Some(take) = config.take { - log::info!(target: LOG_TARGET, "truncating {} winners to {}", supports.len(), take); - supports.sort_unstable_by_key(|(_, s)| s.total); - supports.truncate(take); - } - - // write to file and stdout. - let encoded_support = supports.encode(); - let mut supports_file = std::fs::File::create("solution.supports.bin")?; - supports_file.write_all(&encoded_support)?; - - log::info!(target: LOG_TARGET, "ReadySolution: size {:?} / score = {:?}", encoded_size, score); - log::trace!(target: LOG_TARGET, "Supports: {}", sp_core::hexdisplay::HexDisplay::from(&encoded_support)); - - Ok(()) - }) - } -}}} - -emergency_solution_cmd_for!(polkadot); -emergency_solution_cmd_for!(kusama); -emergency_solution_cmd_for!(westend); diff --git a/polkadot/utils/staking-miner/src/main.rs b/polkadot/utils/staking-miner/src/main.rs deleted file mode 100644 index 90b2c7366a1ba..0000000000000 --- a/polkadot/utils/staking-miner/src/main.rs +++ /dev/null @@ -1,665 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! # Polkadot Staking Miner. -//! -//! Simple bot capable of monitoring a polkadot (and cousins) chain and submitting solutions to the -//! `pallet-election-provider-multi-phase`. See `--help` for more details. -//! -//! # Implementation Notes: -//! -//! - First draft: Be aware that this is the first draft and there might be bugs, or undefined -//! behaviors. Don't attach this bot to an account with lots of funds. -//! - Quick to crash: The bot is written so that it only continues to work if everything goes well. -//! In case of any failure (RPC, logic, IO), it will crash. This was a decision to simplify the -//! development. It is intended to run this bot with a `restart = true` way, so that it reports it -//! crash, but resumes work thereafter. - -// Silence erroneous warning about unsafe not being required whereas it is -// see https://github.com/rust-lang/rust/issues/49112 -#![allow(unused_unsafe)] - -mod dry_run; -mod emergency_solution; -mod monitor; -mod opts; -mod prelude; -mod rpc; -mod runtime_versions; -mod signer; - -pub(crate) use prelude::*; -pub(crate) use signer::get_account_info; - -use crate::opts::*; -use clap::Parser; -use frame_election_provider_support::NposSolver; -use frame_support::traits::Get; -use futures_util::StreamExt; -use jsonrpsee::ws_client::{WsClient, WsClientBuilder}; -use remote_externalities::{Builder, Mode, OnlineConfig, Transport}; -use rpc::{RpcApiClient, SharedRpcClient}; -use runtime_versions::RuntimeVersions; -use signal_hook::consts::signal::*; -use signal_hook_tokio::Signals; -use sp_npos_elections::BalancingConfig; -use std::{ops::Deref, sync::Arc, time::Duration}; -use tracing_subscriber::{fmt, EnvFilter}; - -pub(crate) enum AnyRuntime { - Polkadot, - Kusama, - Westend, -} - -pub(crate) static mut RUNTIME: AnyRuntime = AnyRuntime::Polkadot; - -macro_rules! construct_runtime_prelude { - ($runtime:ident) => { paste::paste! { - pub(crate) mod [<$runtime _runtime_exports>] { - pub(crate) use crate::prelude::EPM; - pub(crate) use [<$runtime _runtime>]::*; - pub(crate) use crate::monitor::[] as monitor_cmd; - pub(crate) use crate::dry_run::[] as dry_run_cmd; - pub(crate) use crate::emergency_solution::[] as emergency_solution_cmd; - pub(crate) use private::{[] as create_uxt}; - - mod private { - use super::*; - pub(crate) fn []( - raw_solution: EPM::RawSolution>, - signer: crate::signer::Signer, - nonce: crate::prelude::Nonce, - tip: crate::prelude::Balance, - era: sp_runtime::generic::Era, - ) -> UncheckedExtrinsic { - use codec::Encode as _; - use sp_core::Pair as _; - use sp_runtime::traits::StaticLookup as _; - - let crate::signer::Signer { account, pair, .. } = signer; - - let local_call = EPMCall::::submit { raw_solution: Box::new(raw_solution) }; - let call: RuntimeCall = as std::convert::TryInto>::try_into(local_call) - .expect("election provider pallet must exist in the runtime, thus \ - inner call can be converted, qed." - ); - - let extra: SignedExtra = crate::[](nonce, tip, era); - let raw_payload = SignedPayload::new(call, extra).expect("creating signed payload infallible; qed."); - let signature = raw_payload.using_encoded(|payload| { - pair.sign(payload) - }); - let (call, extra, _) = raw_payload.deconstruct(); - let address = ::Lookup::unlookup(account); - let extrinsic = UncheckedExtrinsic::new_signed(call, address, signature.into(), extra); - log::debug!( - target: crate::LOG_TARGET, "constructed extrinsic {} with length {}", - sp_core::hexdisplay::HexDisplay::from(&extrinsic.encode()), - extrinsic.encode().len(), - ); - extrinsic - } - } - }} - }; -} - -// NOTE: we might be able to use some code from the bridges repo here. -fn signed_ext_builder_polkadot( - nonce: Nonce, - tip: Balance, - era: sp_runtime::generic::Era, -) -> polkadot_runtime_exports::SignedExtra { - use polkadot_runtime_exports::Runtime; - ( - frame_system::CheckNonZeroSender::::new(), - frame_system::CheckSpecVersion::::new(), - frame_system::CheckTxVersion::::new(), - frame_system::CheckGenesis::::new(), - frame_system::CheckMortality::::from(era), - frame_system::CheckNonce::::from(nonce), - frame_system::CheckWeight::::new(), - pallet_transaction_payment::ChargeTransactionPayment::::from(tip), - runtime_common::claims::PrevalidateAttests::::new(), - ) -} - -fn signed_ext_builder_kusama( - nonce: Nonce, - tip: Balance, - era: sp_runtime::generic::Era, -) -> kusama_runtime_exports::SignedExtra { - use kusama_runtime_exports::Runtime; - ( - frame_system::CheckNonZeroSender::::new(), - frame_system::CheckSpecVersion::::new(), - frame_system::CheckTxVersion::::new(), - frame_system::CheckGenesis::::new(), - frame_system::CheckMortality::::from(era), - frame_system::CheckNonce::::from(nonce), - frame_system::CheckWeight::::new(), - pallet_transaction_payment::ChargeTransactionPayment::::from(tip), - ) -} - -fn signed_ext_builder_westend( - nonce: Nonce, - tip: Balance, - era: sp_runtime::generic::Era, -) -> westend_runtime_exports::SignedExtra { - use westend_runtime_exports::Runtime; - ( - frame_system::CheckNonZeroSender::::new(), - frame_system::CheckSpecVersion::::new(), - frame_system::CheckTxVersion::::new(), - frame_system::CheckGenesis::::new(), - frame_system::CheckMortality::::from(era), - frame_system::CheckNonce::::from(nonce), - frame_system::CheckWeight::::new(), - pallet_transaction_payment::ChargeTransactionPayment::::from(tip), - ) -} - -construct_runtime_prelude!(polkadot); -construct_runtime_prelude!(kusama); -construct_runtime_prelude!(westend); - -// NOTE: this is no longer used extensively, most of the per-runtime stuff us delegated to -// `construct_runtime_prelude` and macro's the import directly from it. A part of the code is also -// still generic over `T`. My hope is to still make everything generic over a `Runtime`, but sadly -// that is not currently possible as each runtime has its unique `Call`, and all Calls are not -// sharing any generic trait. In other words, to create the `UncheckedExtrinsic` of each chain, you -// need the concrete `Call` of that chain as well. -#[macro_export] -macro_rules! any_runtime { - ($($code:tt)*) => { - unsafe { - match $crate::RUNTIME { - $crate::AnyRuntime::Polkadot => { - #[allow(unused)] - use $crate::polkadot_runtime_exports::*; - $($code)* - }, - $crate::AnyRuntime::Kusama => { - #[allow(unused)] - use $crate::kusama_runtime_exports::*; - $($code)* - }, - $crate::AnyRuntime::Westend => { - #[allow(unused)] - use $crate::westend_runtime_exports::*; - $($code)* - } - } - } - } -} - -/// Same as [`any_runtime`], but instead of returning a `Result`, this simply returns `()`. Useful -/// for situations where the result is not useful and un-ergonomic to handle. -#[macro_export] -macro_rules! any_runtime_unit { - ($($code:tt)*) => { - unsafe { - match $crate::RUNTIME { - $crate::AnyRuntime::Polkadot => { - #[allow(unused)] - use $crate::polkadot_runtime_exports::*; - let _ = $($code)*; - }, - $crate::AnyRuntime::Kusama => { - #[allow(unused)] - use $crate::kusama_runtime_exports::*; - let _ = $($code)*; - }, - $crate::AnyRuntime::Westend => { - #[allow(unused)] - use $crate::westend_runtime_exports::*; - let _ = $($code)*; - } - } - } - } -} - -#[derive(frame_support::DebugNoBound, thiserror::Error)] -enum Error { - Io(#[from] std::io::Error), - JsonRpsee(#[from] jsonrpsee::core::Error), - RpcHelperError(#[from] rpc::RpcHelperError), - Codec(#[from] codec::Error), - Crypto(sp_core::crypto::SecretStringError), - RemoteExternalities(&'static str), - PalletMiner(EPM::unsigned::MinerError), - PalletElection(EPM::ElectionError), - PalletFeasibility(EPM::FeasibilityError), - AccountDoesNotExists, - IncorrectPhase, - AlreadySubmitted, - VersionMismatch, - StrategyNotSatisfied, - Other(String), -} - -impl From for Error { - fn from(e: sp_core::crypto::SecretStringError) -> Error { - Error::Crypto(e) - } -} - -impl From for Error { - fn from(e: EPM::unsigned::MinerError) -> Error { - Error::PalletMiner(e) - } -} - -impl From> for Error { - fn from(e: EPM::ElectionError) -> Error { - Error::PalletElection(e) - } -} - -impl From for Error { - fn from(e: EPM::FeasibilityError) -> Error { - Error::PalletFeasibility(e) - } -} - -impl std::fmt::Display for Error { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - as std::fmt::Debug>::fmt(self, f) - } -} - -frame_support::parameter_types! { - /// Number of balancing iterations for a solution algorithm. Set based on the [`Solvers`] CLI - /// config. - pub static BalanceIterations: usize = 10; - pub static Balancing: Option = Some( BalancingConfig { iterations: BalanceIterations::get(), tolerance: 0 } ); -} - -/// Build the Ext at hash with all the data of `ElectionProviderMultiPhase` and any additional -/// pallets. -async fn create_election_ext( - client: SharedRpcClient, - at: Option, - additional: Vec, -) -> Result> -where - T: EPM::Config, -{ - use frame_support::{storage::generator::StorageMap, traits::PalletInfo}; - use sp_core::hashing::twox_128; - - let mut pallets = vec![::PalletInfo::name::>() - .expect("Pallet always has name; qed.") - .to_string()]; - pallets.extend(additional); - Builder::::new() - .mode(Mode::Online(OnlineConfig { - transport: Transport::Uri(client.uri().to_owned()), - at, - pallets, - hashed_prefixes: vec![>::prefix_hash()], - hashed_keys: vec![[twox_128(b"System"), twox_128(b"Number")].concat()], - ..Default::default() - })) - .build() - .await - .map_err(|why| Error::::RemoteExternalities(why)) - .map(|rx| rx.inner_ext) -} - -/// Compute the election. It expects to NOT be `Phase::Off`. In other words, the snapshot must -/// exists on the given externalities. -fn mine_solution( - ext: &mut Ext, - do_feasibility: bool, -) -> Result>, Error> -where - T: EPM::Config, - S: NposSolver< - Error = <::Solver as NposSolver>::Error, - AccountId = <::Solver as NposSolver>::AccountId, - >, -{ - ext.execute_with(|| { - let (solution, _) = >::mine_solution().map_err::, _>(Into::into)?; - if do_feasibility { - let _ = >::feasibility_check( - solution.clone(), - EPM::ElectionCompute::Signed, - )?; - } - Ok(solution) - }) -} - -/// Mine a solution with the given `solver`. -fn mine_with( - solver: &Solver, - ext: &mut Ext, - do_feasibility: bool, -) -> Result>, Error> -where - T: EPM::Config, - T::Solver: NposSolver, -{ - use frame_election_provider_support::{PhragMMS, SequentialPhragmen}; - - match solver { - Solver::SeqPhragmen { iterations } => { - BalanceIterations::set(*iterations); - mine_solution::< - T, - SequentialPhragmen< - ::AccountId, - sp_runtime::Perbill, - Balancing, - >, - >(ext, do_feasibility) - }, - Solver::PhragMMS { iterations } => { - BalanceIterations::set(*iterations); - mine_solution::< - T, - PhragMMS<::AccountId, sp_runtime::Perbill, Balancing>, - >(ext, do_feasibility) - }, - } -} - -#[allow(unused)] -fn mine_dpos(ext: &mut Ext) -> Result<(), Error> { - ext.execute_with(|| { - use std::collections::BTreeMap; - use EPM::RoundSnapshot; - let RoundSnapshot { voters, .. } = EPM::Snapshot::::get().unwrap(); - let desired_targets = EPM::DesiredTargets::::get().unwrap(); - let mut candidates_and_backing = BTreeMap::::new(); - voters.into_iter().for_each(|(who, stake, targets)| { - if targets.is_empty() { - println!("target = {:?}", (who, stake, targets)); - return - } - let share: u128 = (stake as u128) / (targets.len() as u128); - for target in targets { - *candidates_and_backing.entry(target.clone()).or_default() += share - } - }); - - let mut candidates_and_backing = - candidates_and_backing.into_iter().collect::>(); - candidates_and_backing.sort_by_key(|(_, total_stake)| *total_stake); - let winners = candidates_and_backing - .into_iter() - .rev() - .take(desired_targets as usize) - .collect::>(); - let score = { - let min_staker = *winners.last().map(|(_, stake)| stake).unwrap(); - let sum_stake = winners.iter().fold(0u128, |acc, (_, stake)| acc + stake); - let sum_squared = winners.iter().fold(0u128, |acc, (_, stake)| acc + stake); - [min_staker, sum_stake, sum_squared] - }; - println!("mined a dpos-like solution with score = {:?}", score); - Ok(()) - }) -} - -pub(crate) async fn check_versions( - rpc: &SharedRpcClient, - print: bool, -) -> Result<(), Error> { - let linked_version = T::Version::get(); - let on_chain_version = rpc - .runtime_version(None) - .await - .expect("runtime version RPC should always work; qed"); - - let do_print = || { - log::info!( - target: LOG_TARGET, - "linked version {:?}", - (&linked_version.spec_name, &linked_version.spec_version) - ); - log::info!( - target: LOG_TARGET, - "on-chain version {:?}", - (&on_chain_version.spec_name, &on_chain_version.spec_version) - ); - }; - - if print { - do_print(); - } - - // we relax the checking here a bit, which should not cause any issues in production (a chain - // that messes up its spec name is highly unlikely), but it allows us to do easier testing. - if linked_version.spec_name != on_chain_version.spec_name || - linked_version.spec_version != on_chain_version.spec_version - { - if !print { - do_print(); - } - log::error!( - target: LOG_TARGET, - "VERSION MISMATCH: any transaction will fail with bad-proof" - ); - Err(Error::VersionMismatch) - } else { - Ok(()) - } -} - -/// Control how we exit the application -fn controlled_exit(code: i32) { - log::info!(target: LOG_TARGET, "Exiting application"); - std::process::exit(code); -} - -/// Handles the various signal and exit the application -/// when appropriate. -async fn handle_signals(mut signals: Signals) { - let mut keyboard_sig_count: u8 = 0; - while let Some(signal) = signals.next().await { - match signal { - // Interrupts come from the keyboard - SIGQUIT | SIGINT => { - if keyboard_sig_count >= 1 { - log::info!( - target: LOG_TARGET, - "Received keyboard termination signal #{}/{}, quitting...", - keyboard_sig_count + 1, - 2 - ); - controlled_exit(exitcode::OK); - } - keyboard_sig_count += 1; - log::warn!( - target: LOG_TARGET, - "Received keyboard termination signal #{}, if you keep doing that I will really quit", - keyboard_sig_count - ); - }, - - SIGKILL | SIGTERM => { - log::info!(target: LOG_TARGET, "Received SIGKILL | SIGTERM, quitting..."); - controlled_exit(exitcode::OK); - }, - _ => unreachable!(), - } - } -} - -#[tokio::main] -async fn main() { - fmt().with_env_filter(EnvFilter::from_default_env()).init(); - - let Opt { uri, command, connection_timeout, request_timeout } = Opt::parse(); - log::debug!(target: LOG_TARGET, "attempting to connect to {:?}", uri); - - let signals = Signals::new(&[SIGTERM, SIGINT, SIGQUIT]).expect("Failed initializing Signals"); - let handle = signals.handle(); - let signals_task = tokio::spawn(handle_signals(signals)); - - let rpc = loop { - match SharedRpcClient::new( - &uri, - Duration::from_secs(connection_timeout as u64), - Duration::from_secs(request_timeout as u64), - ) - .await - { - Ok(client) => break client, - Err(why) => { - log::warn!( - target: LOG_TARGET, - "failed to connect to client due to {:?}, retrying soon..", - why - ); - tokio::time::sleep(std::time::Duration::from_millis(2500)).await; - }, - } - }; - - let chain: String = rpc.system_chain().await.expect("system_chain infallible; qed."); - match chain.to_lowercase().as_str() { - "polkadot" | "development" => { - sp_core::crypto::set_default_ss58_version( - sp_core::crypto::Ss58AddressFormatRegistry::PolkadotAccount.into(), - ); - sub_tokens::dynamic::set_name("DOT"); - sub_tokens::dynamic::set_decimal_points(10_000_000_000); - // safety: this program will always be single threaded, thus accessing global static is - // safe. - unsafe { - RUNTIME = AnyRuntime::Polkadot; - } - }, - "kusama" | "kusama-dev" => { - sp_core::crypto::set_default_ss58_version( - sp_core::crypto::Ss58AddressFormatRegistry::KusamaAccount.into(), - ); - sub_tokens::dynamic::set_name("KSM"); - sub_tokens::dynamic::set_decimal_points(1_000_000_000_000); - // safety: this program will always be single threaded, thus accessing global static is - // safe. - unsafe { - RUNTIME = AnyRuntime::Kusama; - } - }, - "westend" => { - sp_core::crypto::set_default_ss58_version( - sp_core::crypto::Ss58AddressFormatRegistry::PolkadotAccount.into(), - ); - sub_tokens::dynamic::set_name("WND"); - sub_tokens::dynamic::set_decimal_points(1_000_000_000_000); - // safety: this program will always be single threaded, thus accessing global static is - // safe. - unsafe { - RUNTIME = AnyRuntime::Westend; - } - }, - _ => { - eprintln!("unexpected chain: {:?}", chain); - return - }, - } - log::info!(target: LOG_TARGET, "connected to chain {:?}", chain); - - any_runtime_unit! { - check_versions::(&rpc, true).await - }; - - let outcome = any_runtime! { - match command { - Command::Monitor(monitor_config) => - { - let signer_account = any_runtime! { - signer::signer_uri_from_string::(&monitor_config.seed_or_path , &rpc) - .await - .expect("Provided account is invalid, terminating.") - }; - monitor_cmd(rpc, monitor_config, signer_account).await - .map_err(|e| { - log::error!(target: LOG_TARGET, "Monitor error: {:?}", e); - })}, - Command::DryRun(dryrun_config) => { - let signer_account = any_runtime! { - signer::signer_uri_from_string::(&dryrun_config.seed_or_path , &rpc) - .await - .expect("Provided account is invalid, terminating.") - }; - dry_run_cmd(rpc, dryrun_config, signer_account).await - .map_err(|e| { - log::error!(target: LOG_TARGET, "DryRun error: {:?}", e); - })}, - Command::EmergencySolution(emergency_solution_config) => - emergency_solution_cmd(rpc, emergency_solution_config).await - .map_err(|e| { - log::error!(target: LOG_TARGET, "EmergencySolution error: {:?}", e); - }), - Command::Info(info_opts) => { - let remote_runtime_version = rpc.runtime_version(None).await.expect("runtime_version infallible; qed."); - - let builtin_version = any_runtime! { - Version::get() - }; - - let versions = RuntimeVersions::new(&remote_runtime_version, &builtin_version); - - if !info_opts.json { - println!("{}", versions); - } else { - let versions = serde_json::to_string_pretty(&versions).expect("Failed serializing version info"); - println!("{}", versions); - } - Ok(()) - } - } - }; - log::info!(target: LOG_TARGET, "round of execution finished. outcome = {:?}", outcome); - - handle.close(); - let _ = signals_task.await; -} - -#[cfg(test)] -mod tests { - use super::*; - - fn get_version() -> sp_version::RuntimeVersion { - T::Version::get() - } - - #[test] - fn any_runtime_works() { - unsafe { - RUNTIME = AnyRuntime::Polkadot; - } - let polkadot_version = any_runtime! { get_version::() }; - - unsafe { - RUNTIME = AnyRuntime::Kusama; - } - let kusama_version = any_runtime! { get_version::() }; - - assert_eq!(polkadot_version.spec_name, "polkadot".into()); - assert_eq!(kusama_version.spec_name, "kusama".into()); - } -} diff --git a/polkadot/utils/staking-miner/src/monitor.rs b/polkadot/utils/staking-miner/src/monitor.rs deleted file mode 100644 index 607ecb6baa42c..0000000000000 --- a/polkadot/utils/staking-miner/src/monitor.rs +++ /dev/null @@ -1,478 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! The monitor command. - -use crate::{ - prelude::*, rpc::*, signer::Signer, Error, MonitorConfig, SharedRpcClient, SubmissionStrategy, -}; -use codec::Encode; -use jsonrpsee::core::Error as RpcError; -use sc_transaction_pool_api::TransactionStatus; -use sp_core::storage::StorageKey; -use sp_runtime::Perbill; -use std::sync::Arc; -use tokio::sync::{mpsc, Mutex}; -use EPM::{signed::SubmissionIndicesOf, SignedSubmissionOf}; - -/// Ensure that now is the signed phase. -async fn ensure_signed_phase>( - rpc: &SharedRpcClient, - at: B::Hash, -) -> Result<(), Error> { - let key = StorageKey(EPM::CurrentPhase::::hashed_key().to_vec()); - let phase = rpc - .get_storage_and_decode::>(&key, Some(at)) - .await - .map_err::, _>(Into::into)? - .unwrap_or_default(); - - if phase.is_signed() { - Ok(()) - } else { - Err(Error::IncorrectPhase) - } -} - -/// Ensure that our current `us` have not submitted anything previously. -async fn ensure_no_previous_solution( - rpc: &SharedRpcClient, - at: Hash, - us: &AccountId, -) -> Result<(), Error> -where - T: EPM::Config + frame_system::Config, - B: BlockT, -{ - let indices_key = StorageKey(EPM::SignedSubmissionIndices::::hashed_key().to_vec()); - - let indices: SubmissionIndicesOf = rpc - .get_storage_and_decode(&indices_key, Some(at)) - .await - .map_err::, _>(Into::into)? - .unwrap_or_default(); - - for (_score, _bn, idx) in indices { - let key = StorageKey(EPM::SignedSubmissionsMap::::hashed_key_for(idx)); - - if let Some(submission) = rpc - .get_storage_and_decode::>(&key, Some(at)) - .await - .map_err::, _>(Into::into)? - { - if &submission.who == us { - return Err(Error::AlreadySubmitted) - } - } - } - - Ok(()) -} - -/// `true` if `our_score` should pass the onchain `best_score` with the given strategy. -pub(crate) fn score_passes_strategy( - our_score: sp_npos_elections::ElectionScore, - best_score: sp_npos_elections::ElectionScore, - strategy: SubmissionStrategy, -) -> bool { - match strategy { - SubmissionStrategy::Always => true, - SubmissionStrategy::IfLeading => - our_score == best_score || - our_score.strict_threshold_better(best_score, Perbill::zero()), - SubmissionStrategy::ClaimBetterThan(epsilon) => - our_score.strict_threshold_better(best_score, epsilon), - SubmissionStrategy::ClaimNoWorseThan(epsilon) => - !best_score.strict_threshold_better(our_score, epsilon), - } -} - -/// Reads all current solutions and checks the scores according to the `SubmissionStrategy`. -async fn ensure_strategy_met( - rpc: &SharedRpcClient, - at: Hash, - score: sp_npos_elections::ElectionScore, - strategy: SubmissionStrategy, - max_submissions: u32, -) -> Result<(), Error> { - // don't care about current scores. - if matches!(strategy, SubmissionStrategy::Always) { - return Ok(()) - } - - let indices_key = StorageKey(EPM::SignedSubmissionIndices::::hashed_key().to_vec()); - - let indices: SubmissionIndicesOf = rpc - .get_storage_and_decode(&indices_key, Some(at)) - .await - .map_err::, _>(Into::into)? - .unwrap_or_default(); - - if indices.len() >= max_submissions as usize { - log::debug!(target: LOG_TARGET, "The submissions queue is full"); - } - - // default score is all zeros, any score is better than it. - let best_score = indices.last().map(|(score, _, _)| *score).unwrap_or_default(); - log::debug!(target: LOG_TARGET, "best onchain score is {:?}", best_score); - - if score_passes_strategy(score, best_score, strategy) { - Ok(()) - } else { - Err(Error::StrategyNotSatisfied) - } -} - -async fn get_latest_head( - rpc: &SharedRpcClient, - mode: &str, -) -> Result> { - if mode == "head" { - match rpc.block_hash(None).await { - Ok(Some(hash)) => Ok(hash), - Ok(None) => Err(Error::Other("Best head not found".into())), - Err(e) => Err(e.into()), - } - } else { - rpc.finalized_head().await.map_err(Into::into) - } -} - -macro_rules! monitor_cmd_for { ($runtime:tt) => { paste::paste! { - - /// The monitor command. - pub(crate) async fn []( - rpc: SharedRpcClient, - config: MonitorConfig, - signer: Signer, - ) -> Result<(), Error<$crate::[<$runtime _runtime_exports>]::Runtime>> { - use $crate::[<$runtime _runtime_exports>]::*; - type StakingMinerError = Error<$crate::[<$runtime _runtime_exports>]::Runtime>; - - let heads_subscription = || - if config.listen == "head" { - rpc.subscribe_new_heads() - } else { - rpc.subscribe_finalized_heads() - }; - - let mut subscription = heads_subscription().await?; - let (tx, mut rx) = mpsc::unbounded_channel::(); - let submit_lock = Arc::new(Mutex::new(())); - - loop { - let at = tokio::select! { - maybe_rp = subscription.next() => { - match maybe_rp { - Some(Ok(r)) => r, - Some(Err(e)) => { - log::error!(target: LOG_TARGET, "subscription failed to decode Header {:?}, this is bug please file an issue", e); - return Err(e.into()); - } - // The subscription was dropped, should only happen if: - // - the connection was closed. - // - the subscription could not keep up with the server. - None => { - log::warn!(target: LOG_TARGET, "subscription to `subscribeNewHeads/subscribeFinalizedHeads` terminated. Retrying.."); - subscription = heads_subscription().await?; - continue - } - } - }, - maybe_err = rx.recv() => { - match maybe_err { - Some(err) => return Err(err), - None => unreachable!("at least one sender kept in the main loop should always return Some; qed"), - } - } - }; - - // Spawn task and non-recoverable errors are sent back to the main task - // such as if the connection has been closed. - tokio::spawn( - send_and_watch_extrinsic(rpc.clone(), tx.clone(), at, signer.clone(), config.clone(), submit_lock.clone()) - ); - } - - /// Construct extrinsic at given block and watch it. - async fn send_and_watch_extrinsic( - rpc: SharedRpcClient, - tx: mpsc::UnboundedSender, - at: Header, - signer: Signer, - config: MonitorConfig, - submit_lock: Arc>, - ) { - - async fn flatten( - handle: tokio::task::JoinHandle> - ) -> Result { - match handle.await { - Ok(Ok(result)) => Ok(result), - Ok(Err(err)) => Err(err), - Err(err) => panic!("tokio spawn task failed; kill task: {:?}", err), - } - } - - let hash = at.hash(); - log::trace!(target: LOG_TARGET, "new event at #{:?} ({:?})", at.number, hash); - - // block on this because if this fails there is no way to recover from - // that error i.e, upgrade/downgrade required. - if let Err(err) = crate::check_versions::(&rpc, false).await { - let _ = tx.send(err.into()); - return; - } - - let rpc1 = rpc.clone(); - let rpc2 = rpc.clone(); - let account = signer.account.clone(); - - let signed_phase_fut = tokio::spawn(async move { - ensure_signed_phase::(&rpc1, hash).await - }); - - tokio::time::sleep(std::time::Duration::from_secs(config.delay as u64)).await; - - let no_prev_sol_fut = tokio::spawn(async move { - ensure_no_previous_solution::(&rpc2, hash, &account).await - }); - - // Run the calls in parallel and return once all has completed or any failed. - if let Err(err) = tokio::try_join!(flatten(signed_phase_fut), flatten(no_prev_sol_fut)) { - log::debug!(target: LOG_TARGET, "Skipping block {}; {}", at.number, err); - return; - } - - let _lock = submit_lock.lock().await; - - let mut ext = match crate::create_election_ext::(rpc.clone(), Some(hash), vec![]).await { - Ok(ext) => ext, - Err(err) => { - log::debug!(target: LOG_TARGET, "Skipping block {}; {}", at.number, err); - return; - } - }; - - // mine a solution, and run feasibility check on it as well. - let raw_solution = match crate::mine_with::(&config.solver, &mut ext, true) { - Ok(r) => r, - Err(err) => { - let _ = tx.send(err.into()); - return; - } - }; - - let score = raw_solution.score; - log::info!(target: LOG_TARGET, "mined solution with {:?}", score); - - let nonce = match crate::get_account_info::(&rpc, &signer.account, Some(hash)).await { - Ok(maybe_account) => { - let acc = maybe_account.expect(crate::signer::SIGNER_ACCOUNT_WILL_EXIST); - acc.nonce - } - Err(err) => { - let _ = tx.send(err); - return; - } - }; - - let tip = 0 as Balance; - let period = ::BlockHashCount::get() / 2; - let current_block = at.number.saturating_sub(1); - let era = sp_runtime::generic::Era::mortal(period.into(), current_block.into()); - - log::trace!( - target: LOG_TARGET, "transaction mortality: {:?} -> {:?}", - era.birth(current_block.into()), - era.death(current_block.into()), - ); - - let extrinsic = ext.execute_with(|| create_uxt(raw_solution, signer.clone(), nonce, tip, era)); - let bytes = sp_core::Bytes(extrinsic.encode()); - - let rpc1 = rpc.clone(); - let rpc2 = rpc.clone(); - let rpc3 = rpc.clone(); - - let latest_head = match get_latest_head::(&rpc, &config.listen).await { - Ok(hash) => hash, - Err(e) => { - log::debug!(target: LOG_TARGET, "Skipping to submit at block {}; {}", at.number, e); - return; - } - }; - - let ensure_strategy_met_fut = tokio::spawn(async move { - ensure_strategy_met::( - &rpc1, - latest_head, - score, - config.submission_strategy, - SignedMaxSubmissions::get() - ).await - }); - - let ensure_signed_phase_fut = tokio::spawn(async move { - ensure_signed_phase::(&rpc2, latest_head).await - }); - - let account = signer.account.clone(); - let no_prev_sol_fut = tokio::spawn(async move { - ensure_no_previous_solution::(&rpc3, latest_head, &account).await - }); - - // Run the calls in parallel and return once all has completed or any failed. - if let Err(err) = tokio::try_join!( - flatten(ensure_strategy_met_fut), - flatten(ensure_signed_phase_fut), - flatten(no_prev_sol_fut), - ) { - log::debug!(target: LOG_TARGET, "Skipping to submit at block {}; {}", at.number, err); - return; - } - - let mut tx_subscription = match rpc.watch_extrinsic(&bytes).await { - Ok(sub) => sub, - Err(RpcError::RestartNeeded(e)) => { - let _ = tx.send(RpcError::RestartNeeded(e).into()); - return - }, - Err(why) => { - // This usually happens when we've been busy with mining for a few blocks, and - // now we're receiving the subscriptions of blocks in which we were busy. In - // these blocks, we still don't have a solution, so we re-compute a new solution - // and submit it with an outdated `Nonce`, which yields most often `Stale` - // error. NOTE: to improve this overall, and to be able to introduce an array of - // other fancy features, we should make this multi-threaded and do the - // computation outside of this callback. - log::warn!( - target: LOG_TARGET, - "failing to submit a transaction {:?}. ignore block: {}", - why, at.number - ); - return; - }, - }; - - while let Some(rp) = tx_subscription.next().await { - let status_update = match rp { - Ok(r) => r, - Err(e) => { - log::error!(target: LOG_TARGET, "subscription failed to decode TransactionStatus {:?}, this is a bug please file an issue", e); - let _ = tx.send(e.into()); - return; - }, - }; - - log::trace!(target: LOG_TARGET, "status update {:?}", status_update); - match status_update { - TransactionStatus::Ready | - TransactionStatus::Broadcast(_) | - TransactionStatus::Future => continue, - TransactionStatus::InBlock((hash, _)) => { - log::info!(target: LOG_TARGET, "included at {:?}", hash); - let key = StorageKey( - frame_support::storage::storage_prefix(b"System", b"Events").to_vec(), - ); - - let events = match rpc.get_storage_and_decode::< - Vec::Hash>>, - >(&key, Some(hash)) - .await { - Ok(rp) => rp.unwrap_or_default(), - Err(RpcHelperError::JsonRpsee(RpcError::RestartNeeded(e))) => { - let _ = tx.send(RpcError::RestartNeeded(e).into()); - return; - } - // Decoding or other RPC error => just terminate the task. - Err(e) => { - log::warn!(target: LOG_TARGET, "get_storage [key: {:?}, hash: {:?}] failed: {:?}; skip block: {}", - key, hash, e, at.number - ); - return; - } - }; - - log::info!(target: LOG_TARGET, "events at inclusion {:?}", events); - }, - TransactionStatus::Retracted(hash) => { - log::info!(target: LOG_TARGET, "Retracted at {:?}", hash); - }, - TransactionStatus::Finalized((hash, _)) => { - log::info!(target: LOG_TARGET, "Finalized at {:?}", hash); - break - }, - _ => { - log::warn!( - target: LOG_TARGET, - "Stopping listen due to other status {:?}", - status_update - ); - break - }, - }; - } - } - } -}}} - -monitor_cmd_for!(polkadot); -monitor_cmd_for!(kusama); -monitor_cmd_for!(westend); - -#[cfg(test)] -pub mod tests { - use super::*; - - #[test] - fn score_passes_strategy_works() { - let s = |x| sp_npos_elections::ElectionScore { minimal_stake: x, ..Default::default() }; - let two = Perbill::from_percent(2); - - // anything passes Always - assert!(score_passes_strategy(s(0), s(0), SubmissionStrategy::Always)); - assert!(score_passes_strategy(s(5), s(0), SubmissionStrategy::Always)); - assert!(score_passes_strategy(s(5), s(10), SubmissionStrategy::Always)); - - // if leading - assert!(score_passes_strategy(s(0), s(0), SubmissionStrategy::IfLeading)); - assert!(score_passes_strategy(s(1), s(0), SubmissionStrategy::IfLeading)); - assert!(score_passes_strategy(s(2), s(0), SubmissionStrategy::IfLeading)); - assert!(!score_passes_strategy(s(5), s(10), SubmissionStrategy::IfLeading)); - assert!(!score_passes_strategy(s(9), s(10), SubmissionStrategy::IfLeading)); - assert!(score_passes_strategy(s(10), s(10), SubmissionStrategy::IfLeading)); - - // if better by 2% - assert!(!score_passes_strategy(s(50), s(100), SubmissionStrategy::ClaimBetterThan(two))); - assert!(!score_passes_strategy(s(100), s(100), SubmissionStrategy::ClaimBetterThan(two))); - assert!(!score_passes_strategy(s(101), s(100), SubmissionStrategy::ClaimBetterThan(two))); - assert!(!score_passes_strategy(s(102), s(100), SubmissionStrategy::ClaimBetterThan(two))); - assert!(score_passes_strategy(s(103), s(100), SubmissionStrategy::ClaimBetterThan(two))); - assert!(score_passes_strategy(s(150), s(100), SubmissionStrategy::ClaimBetterThan(two))); - - // if no less than 2% worse - assert!(!score_passes_strategy(s(50), s(100), SubmissionStrategy::ClaimNoWorseThan(two))); - assert!(!score_passes_strategy(s(97), s(100), SubmissionStrategy::ClaimNoWorseThan(two))); - assert!(score_passes_strategy(s(98), s(100), SubmissionStrategy::ClaimNoWorseThan(two))); - assert!(score_passes_strategy(s(99), s(100), SubmissionStrategy::ClaimNoWorseThan(two))); - assert!(score_passes_strategy(s(100), s(100), SubmissionStrategy::ClaimNoWorseThan(two))); - assert!(score_passes_strategy(s(101), s(100), SubmissionStrategy::ClaimNoWorseThan(two))); - assert!(score_passes_strategy(s(102), s(100), SubmissionStrategy::ClaimNoWorseThan(two))); - assert!(score_passes_strategy(s(103), s(100), SubmissionStrategy::ClaimNoWorseThan(two))); - assert!(score_passes_strategy(s(150), s(100), SubmissionStrategy::ClaimNoWorseThan(two))); - } -} diff --git a/polkadot/utils/staking-miner/src/opts.rs b/polkadot/utils/staking-miner/src/opts.rs deleted file mode 100644 index 4cf4d0a765199..0000000000000 --- a/polkadot/utils/staking-miner/src/opts.rs +++ /dev/null @@ -1,366 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -use crate::prelude::*; -use clap::Parser; -use sp_runtime::Perbill; -use std::str::FromStr; - -#[derive(Debug, Clone, Parser)] -#[cfg_attr(test, derive(PartialEq))] -#[command(author, version, about)] -pub(crate) struct Opt { - /// The `ws` node to connect to. - #[arg(long, short, default_value = DEFAULT_URI, env = "URI", global = true)] - pub uri: String, - - /// WS connection timeout in number of seconds. - #[arg(long, default_value_t = 60)] - pub connection_timeout: usize, - - /// WS request timeout in number of seconds. - #[arg(long, default_value_t = 60 * 10)] - pub request_timeout: usize, - - #[command(subcommand)] - pub command: Command, -} - -#[derive(Debug, Clone, Parser)] -#[cfg_attr(test, derive(PartialEq))] -pub(crate) enum Command { - /// Monitor for the phase being signed, then compute. - Monitor(MonitorConfig), - - /// Just compute a solution now, and don't submit it. - DryRun(DryRunConfig), - - /// Provide a solution that can be submitted to the chain as an emergency response. - EmergencySolution(EmergencySolutionConfig), - - /// Return information about the current version - Info(InfoOpts), -} - -#[derive(Debug, Clone, Parser)] -#[cfg_attr(test, derive(PartialEq))] -pub(crate) struct MonitorConfig { - /// The path to a file containing the seed of the account. If the file is not found, the seed - /// is used as-is. - /// - /// Can also be provided via the `SEED` environment variable. - /// - /// WARNING: Don't use an account with a large stash for this. Based on how the bot is - /// configured, it might re-try and lose funds through transaction fees/deposits. - #[arg(long, short, env = "SEED")] - pub seed_or_path: String, - - /// They type of event to listen to. - /// - /// Typically, finalized is safer and there is no chance of anything going wrong, but it can be - /// slower. It is recommended to use finalized, if the duration of the signed phase is longer - /// than the the finality delay. - #[arg(long, default_value = "head", value_parser = ["head", "finalized"])] - pub listen: String, - - /// The solver algorithm to use. - #[command(subcommand)] - pub solver: Solver, - - /// Submission strategy to use. - /// - /// Possible options: - /// - /// `--submission-strategy if-leading`: only submit if leading. - /// - /// `--submission-strategy always`: always submit. - /// - /// `--submission-strategy "percent-better percent"`: submit if the submission is `n` percent - /// better. - /// - /// `--submission-strategy "no-worse-than percent"`: submit if submission is no more than - /// `n` percent worse. - #[clap(long, default_value = "if-leading")] - pub submission_strategy: SubmissionStrategy, - - /// Delay in number seconds to wait until starting mining a solution. - /// - /// At every block when a solution is attempted - /// a delay can be enforced to avoid submitting at - /// "same time" and risk potential races with other miners. - /// - /// When this is enabled and there are competing solutions, your solution might not be - /// submitted if the scores are equal. - #[arg(long, default_value_t = 0)] - pub delay: usize, -} - -#[derive(Debug, Clone, Parser)] -#[cfg_attr(test, derive(PartialEq))] -pub(crate) struct DryRunConfig { - /// The path to a file containing the seed of the account. If the file is not found, the seed - /// is used as-is. - /// - /// Can also be provided via the `SEED` environment variable. - /// - /// WARNING: Don't use an account with a large stash for this. Based on how the bot is - /// configured, it might re-try and lose funds through transaction fees/deposits. - #[arg(long, short, env = "SEED")] - pub seed_or_path: String, - - /// The block hash at which scraping happens. If none is provided, the latest head is used. - #[arg(long)] - pub at: Option, - - /// The solver algorithm to use. - #[command(subcommand)] - pub solver: Solver, - - /// Force create a new snapshot, else expect one to exist onchain. - #[arg(long)] - pub force_snapshot: bool, -} - -#[derive(Debug, Clone, Parser)] -#[cfg_attr(test, derive(PartialEq))] -pub(crate) struct EmergencySolutionConfig { - /// The block hash at which scraping happens. If none is provided, the latest head is used. - #[arg(long)] - pub at: Option, - - /// The solver algorithm to use. - #[command(subcommand)] - pub solver: Solver, - - /// The number of top backed winners to take. All are taken, if not provided. - pub take: Option, -} - -#[derive(Debug, Clone, Parser)] -#[cfg_attr(test, derive(PartialEq))] -pub(crate) struct InfoOpts { - /// Serialize the output as json - #[arg(long, short)] - pub json: bool, -} - -/// Submission strategy to use. -#[derive(Debug, Copy, Clone)] -#[cfg_attr(test, derive(PartialEq))] -pub enum SubmissionStrategy { - /// Always submit. - Always, - /// Only submit if at the time, we are the best (or equal to it). - IfLeading, - /// Submit if we are no worse than `Perbill` worse than the best. - ClaimNoWorseThan(Perbill), - /// Submit if we are leading, or if the solution that's leading is more that the given - /// `Perbill` better than us. This helps detect obviously fake solutions and still combat them. - ClaimBetterThan(Perbill), -} - -#[derive(Debug, Clone, Parser)] -#[cfg_attr(test, derive(PartialEq))] -pub(crate) enum Solver { - SeqPhragmen { - #[arg(long, default_value_t = 10)] - iterations: usize, - }, - PhragMMS { - #[arg(long, default_value_t = 10)] - iterations: usize, - }, -} - -/// Custom `impl` to parse `SubmissionStrategy` from CLI. -/// -/// Possible options: -/// * --submission-strategy if-leading: only submit if leading -/// * --submission-strategy always: always submit -/// * --submission-strategy "percent-better percent": submit if submission is `n` percent better. -/// * --submission-strategy "no-worse-than percent": submit if submission is no more than `n` -/// percent worse. -impl FromStr for SubmissionStrategy { - type Err = String; - - fn from_str(s: &str) -> Result { - let s = s.trim(); - - let res = if s == "if-leading" { - Self::IfLeading - } else if s == "always" { - Self::Always - } else if let Some(percent) = s.strip_prefix("no-worse-than ") { - let percent: u32 = percent.parse().map_err(|e| format!("{:?}", e))?; - Self::ClaimNoWorseThan(Perbill::from_percent(percent)) - } else if let Some(percent) = s.strip_prefix("percent-better ") { - let percent: u32 = percent.parse().map_err(|e| format!("{:?}", e))?; - Self::ClaimBetterThan(Perbill::from_percent(percent)) - } else { - return Err(s.into()) - }; - Ok(res) - } -} - -#[cfg(test)] -mod test_super { - use super::*; - - #[test] - fn cli_monitor_works() { - let opt = Opt::try_parse_from([ - env!("CARGO_PKG_NAME"), - "--uri", - "hi", - "monitor", - "--seed-or-path", - "//Alice", - "--listen", - "head", - "--delay", - "12", - "seq-phragmen", - ]) - .unwrap(); - - assert_eq!( - opt, - Opt { - uri: "hi".to_string(), - connection_timeout: 60, - request_timeout: 10 * 60, - command: Command::Monitor(MonitorConfig { - seed_or_path: "//Alice".to_string(), - listen: "head".to_string(), - solver: Solver::SeqPhragmen { iterations: 10 }, - submission_strategy: SubmissionStrategy::IfLeading, - delay: 12, - }), - } - ); - } - - #[test] - fn cli_dry_run_works() { - let opt = Opt::try_parse_from([ - env!("CARGO_PKG_NAME"), - "--uri", - "hi", - "dry-run", - "--seed-or-path", - "//Alice", - "phrag-mms", - ]) - .unwrap(); - - assert_eq!( - opt, - Opt { - uri: "hi".to_string(), - connection_timeout: 60, - request_timeout: 10 * 60, - command: Command::DryRun(DryRunConfig { - seed_or_path: "//Alice".to_string(), - at: None, - solver: Solver::PhragMMS { iterations: 10 }, - force_snapshot: false, - }), - } - ); - } - - #[test] - fn cli_emergency_works() { - let opt = Opt::try_parse_from([ - env!("CARGO_PKG_NAME"), - "--uri", - "hi", - "emergency-solution", - "99", - "phrag-mms", - "--iterations", - "1337", - ]) - .unwrap(); - - assert_eq!( - opt, - Opt { - uri: "hi".to_string(), - connection_timeout: 60, - request_timeout: 10 * 60, - command: Command::EmergencySolution(EmergencySolutionConfig { - take: Some(99), - at: None, - solver: Solver::PhragMMS { iterations: 1337 } - }), - } - ); - } - - #[test] - fn cli_info_works() { - let opt = Opt::try_parse_from([env!("CARGO_PKG_NAME"), "--uri", "hi", "info"]).unwrap(); - - assert_eq!( - opt, - Opt { - uri: "hi".to_string(), - connection_timeout: 60, - request_timeout: 10 * 60, - command: Command::Info(InfoOpts { json: false }) - } - ); - } - - #[test] - fn cli_request_conn_timeout_works() { - let opt = Opt::try_parse_from([ - env!("CARGO_PKG_NAME"), - "--uri", - "hi", - "--request-timeout", - "10", - "--connection-timeout", - "9", - "info", - ]) - .unwrap(); - - assert_eq!( - opt, - Opt { - uri: "hi".to_string(), - connection_timeout: 9, - request_timeout: 10, - command: Command::Info(InfoOpts { json: false }) - } - ); - } - - #[test] - fn submission_strategy_from_str_works() { - use std::str::FromStr; - - assert_eq!(SubmissionStrategy::from_str("if-leading"), Ok(SubmissionStrategy::IfLeading)); - assert_eq!(SubmissionStrategy::from_str("always"), Ok(SubmissionStrategy::Always)); - assert_eq!( - SubmissionStrategy::from_str(" percent-better 99 "), - Ok(SubmissionStrategy::ClaimBetterThan(Perbill::from_percent(99))) - ); - } -} diff --git a/polkadot/utils/staking-miner/src/prelude.rs b/polkadot/utils/staking-miner/src/prelude.rs deleted file mode 100644 index fb701ece2384d..0000000000000 --- a/polkadot/utils/staking-miner/src/prelude.rs +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Types that we don't fetch from a particular runtime and just assume that they are constant all -//! of the place. -//! -//! It is actually easy to convert the rest as well, but it'll be a lot of noise in our codebase, -//! needing to sprinkle `any_runtime` in a few extra places. - -/// The account id type. -pub type AccountId = core_primitives::AccountId; -/// The block number type. -pub type BlockNumber = core_primitives::BlockNumber; -/// The balance type. -pub type Balance = core_primitives::Balance; -/// Index of a transaction in the chain. -pub type Nonce = core_primitives::Nonce; -/// The hash type. We re-export it here, but we can easily get it from block as well. -pub type Hash = core_primitives::Hash; -/// The header type. We re-export it here, but we can easily get it from block as well. -pub type Header = core_primitives::Header; -/// The block type. -pub type Block = core_primitives::Block; - -pub use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; - -/// Default URI to connect to. -pub const DEFAULT_URI: &str = "wss://rpc.polkadot.io:443"; -/// The logging target. -pub const LOG_TARGET: &str = "staking-miner"; - -/// The election provider pallet. -pub use pallet_election_provider_multi_phase as EPM; - -/// The externalities type. -pub type Ext = sp_state_machine::TestExternalities>; - -/// The key pair type being used. We "strongly" assume sr25519 for simplicity. -pub type Pair = sp_core::sr25519::Pair; - -/// A dynamic token type used to represent account balances. -pub type Token = sub_tokens::dynamic::DynamicToken; diff --git a/polkadot/utils/staking-miner/src/rpc.rs b/polkadot/utils/staking-miner/src/rpc.rs deleted file mode 100644 index 2d25616e2a179..0000000000000 --- a/polkadot/utils/staking-miner/src/rpc.rs +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! JSON-RPC related types and helpers. - -use super::*; -use jsonrpsee::{ - core::{Error as RpcError, RpcResult}, - proc_macros::rpc, -}; -use pallet_transaction_payment::RuntimeDispatchInfo; -use sc_transaction_pool_api::TransactionStatus; -use sp_core::{storage::StorageKey, Bytes}; -use sp_version::RuntimeVersion; -use std::{future::Future, time::Duration}; - -#[derive(frame_support::DebugNoBound, thiserror::Error)] -pub(crate) enum RpcHelperError { - JsonRpsee(#[from] jsonrpsee::core::Error), - Codec(#[from] codec::Error), -} - -impl std::fmt::Display for RpcHelperError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - ::fmt(self, f) - } -} - -#[rpc(client)] -pub trait RpcApi { - /// Fetch system name. - #[method(name = "system_chain")] - async fn system_chain(&self) -> RpcResult; - - /// Fetch a storage key. - #[method(name = "state_getStorage")] - async fn storage(&self, key: &StorageKey, hash: Option) -> RpcResult>; - - /// Fetch the runtime version. - #[method(name = "state_getRuntimeVersion")] - async fn runtime_version(&self, at: Option) -> RpcResult; - - /// Fetch the payment query info. - #[method(name = "payment_queryInfo")] - async fn payment_query_info( - &self, - encoded_xt: &Bytes, - at: Option<&Hash>, - ) -> RpcResult>; - - /// Dry run an extrinsic at a given block. Return SCALE encoded - /// [`sp_runtime::ApplyExtrinsicResult`]. - #[method(name = "system_dryRun")] - async fn dry_run(&self, extrinsic: &Bytes, at: Option) -> RpcResult; - - /// Get hash of the n-th block in the canon chain. - /// - /// By default returns latest block hash. - #[method(name = "chain_getBlockHash", aliases = ["chain_getHead"], blocking)] - fn block_hash(&self, hash: Option) -> RpcResult>; - - /// Get hash of the last finalized block in the canon chain. - #[method(name = "chain_getFinalizedHead", aliases = ["chain_getFinalisedHead"], blocking)] - fn finalized_head(&self) -> RpcResult; - - /// Submit an extrinsic to watch. - /// - /// See [`TransactionStatus`](sc_transaction_pool_api::TransactionStatus) for details on - /// transaction life cycle. - #[subscription( - name = "author_submitAndWatchExtrinsic" => "author_extrinsicUpdate", - unsubscribe = "author_unwatchExtrinsic", - item = TransactionStatus - )] - fn watch_extrinsic(&self, bytes: &Bytes); - - /// New head subscription. - #[subscription( - name = "chain_subscribeNewHeads" => "newHead", - unsubscribe = "chain_unsubscribeNewHeads", - item = Header - )] - fn subscribe_new_heads(&self); - - /// Finalized head subscription. - #[subscription( - name = "chain_subscribeFinalizedHeads" => "chain_finalizedHead", - unsubscribe = "chain_unsubscribeFinalizedHeads", - item = Header - )] - fn subscribe_finalized_heads(&self); -} - -type Uri = String; - -/// Wraps a shared web-socket JSON-RPC client that can be cloned. -#[derive(Clone, Debug)] -pub(crate) struct SharedRpcClient(Arc, Uri); - -impl Deref for SharedRpcClient { - type Target = WsClient; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl SharedRpcClient { - /// Get the URI of the client. - pub fn uri(&self) -> &str { - &self.1 - } - - /// Create a new shared JSON-RPC web-socket client. - pub(crate) async fn new( - uri: &str, - connection_timeout: Duration, - request_timeout: Duration, - ) -> Result { - let client = WsClientBuilder::default() - .connection_timeout(connection_timeout) - .max_request_body_size(u32::MAX) - .request_timeout(request_timeout) - .max_concurrent_requests(u32::MAX as usize) - .build(uri) - .await?; - Ok(Self(Arc::new(client), uri.to_owned())) - } - - /// Get a storage item and decode it as `T`. - /// - /// # Return value: - /// - /// The function returns: - /// - /// * `Ok(Some(val))` if successful. - /// * `Ok(None)` if the storage item was not found. - /// * `Err(e)` if the JSON-RPC call failed. - pub(crate) async fn get_storage_and_decode<'a, T: codec::Decode>( - &self, - key: &StorageKey, - hash: Option, - ) -> Result, RpcHelperError> { - if let Some(bytes) = self.storage(key, hash).await? { - let decoded = ::decode(&mut &*bytes.0) - .map_err::(Into::into)?; - Ok(Some(decoded)) - } else { - Ok(None) - } - } -} - -/// Takes a future that returns `Bytes` and tries to decode those bytes into the type `Dec`. -/// Warning: don't use for storage, it will fail for non-existent storage items. -/// -/// # Return value: -/// -/// The function returns: -/// -/// * `Ok(val)` if successful. -/// * `Err(RpcHelperError::JsonRpsee)` if the JSON-RPC call failed. -/// * `Err(RpcHelperError::Codec)` if `Bytes` could not be decoded. -pub(crate) async fn await_request_and_decode<'a, Dec: codec::Decode>( - req: impl Future>, -) -> Result { - let bytes = req.await?; - Dec::decode(&mut &*bytes.0).map_err::(Into::into) -} diff --git a/polkadot/utils/staking-miner/src/runtime_versions.rs b/polkadot/utils/staking-miner/src/runtime_versions.rs deleted file mode 100644 index 38af05ead2414..0000000000000 --- a/polkadot/utils/staking-miner/src/runtime_versions.rs +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -use sp_version::RuntimeVersion; -use std::fmt; - -#[derive(Debug, serde::Serialize)] -pub(crate) struct RuntimeWrapper<'a>(pub &'a RuntimeVersion); - -impl<'a> fmt::Display for RuntimeWrapper<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let width = 16; - - writeln!( - f, - r#" impl_name : {impl_name:>width$} - spec_name : {spec_name:>width$} - spec_version : {spec_version:>width$} - transaction_version : {transaction_version:>width$} - impl_version : {impl_version:>width$} - authoringVersion : {authoring_version:>width$} - state_version : {state_version:>width$}"#, - spec_name = self.0.spec_name.to_string(), - impl_name = self.0.impl_name.to_string(), - spec_version = self.0.spec_version, - impl_version = self.0.impl_version, - authoring_version = self.0.authoring_version, - transaction_version = self.0.transaction_version, - state_version = self.0.state_version, - ) - } -} - -impl<'a> From<&'a RuntimeVersion> for RuntimeWrapper<'a> { - fn from(r: &'a RuntimeVersion) -> Self { - RuntimeWrapper(r) - } -} - -#[derive(Debug, serde::Serialize)] -pub(crate) struct RuntimeVersions<'a> { - /// The `RuntimeVersion` linked in the staking-miner - pub linked: RuntimeWrapper<'a>, - - /// The `RuntimeVersion` reported by the node we connect to via RPC - pub remote: RuntimeWrapper<'a>, - - /// This `bool` reports whether both remote and linked `RuntimeVersion` are compatible - /// and if the staking-miner is expected to work properly against the remote runtime - compatible: bool, -} - -impl<'a> RuntimeVersions<'a> { - pub fn new( - remote_runtime_version: &'a RuntimeVersion, - linked_runtime_version: &'a RuntimeVersion, - ) -> Self { - Self { - remote: remote_runtime_version.into(), - linked: linked_runtime_version.into(), - compatible: are_runtimes_compatible(remote_runtime_version, linked_runtime_version), - } - } -} - -/// Check whether runtimes are compatible. Currently we only support equality. -fn are_runtimes_compatible(r1: &RuntimeVersion, r2: &RuntimeVersion) -> bool { - r1 == r2 -} - -impl<'a> fmt::Display for RuntimeVersions<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let _ = write!(f, "- linked:\n{}", self.linked); - let _ = write!(f, "- remote :\n{}", self.remote); - write!(f, "Compatible: {}", if self.compatible { "YES" } else { "NO" }) - } -} diff --git a/polkadot/utils/staking-miner/src/signer.rs b/polkadot/utils/staking-miner/src/signer.rs deleted file mode 100644 index e6677ccd3a661..0000000000000 --- a/polkadot/utils/staking-miner/src/signer.rs +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Wrappers around creating a signer account. - -use crate::{prelude::*, rpc::SharedRpcClient, AccountId, Error, Nonce, Pair, LOG_TARGET}; -use frame_system::AccountInfo; -use sp_core::{crypto::Pair as _, storage::StorageKey}; - -pub(crate) const SIGNER_ACCOUNT_WILL_EXIST: &str = - "signer account is checked to exist upon startup; it can only die if it transfers funds out \ - of it, or get slashed. If it does not exist at this point, it is likely due to a bug, or the \ - signer got slashed. Terminating."; - -/// Some information about the signer. Redundant at this point, but makes life easier. -#[derive(Clone)] -pub(crate) struct Signer { - /// The account id. - pub(crate) account: AccountId, - - /// The full crypto key-pair. - pub(crate) pair: Pair, -} - -pub(crate) async fn get_account_info + EPM::Config>( - rpc: &SharedRpcClient, - who: &T::AccountId, - maybe_at: Option, -) -> Result>, Error> { - rpc.get_storage_and_decode::>( - &StorageKey(>::hashed_key_for(&who)), - maybe_at, - ) - .await - .map_err(Into::into) -} - -/// Read the signer account's URI -pub(crate) async fn signer_uri_from_string< - T: frame_system::Config< - AccountId = AccountId, - Nonce = Nonce, - AccountData = pallet_balances::AccountData, - Hash = Hash, - > + EPM::Config, ->( - mut seed_or_path: &str, - client: &SharedRpcClient, -) -> Result> { - seed_or_path = seed_or_path.trim(); - - let seed = match std::fs::read(seed_or_path) { - Ok(s) => String::from_utf8(s).map_err(|_| Error::::AccountDoesNotExists)?, - Err(_) => seed_or_path.to_string(), - }; - let seed = seed.trim(); - - let pair = Pair::from_string(seed, None)?; - let account = T::AccountId::from(pair.public()); - let _info = get_account_info::(client, &account, None) - .await? - .ok_or(Error::::AccountDoesNotExists)?; - log::info!( - target: LOG_TARGET, - "loaded account {:?}, free: {:?}, info: {:?}", - &account, - Token::from(_info.data.free), - _info - ); - Ok(Signer { account, pair }) -} diff --git a/polkadot/utils/staking-miner/tests/cli.rs b/polkadot/utils/staking-miner/tests/cli.rs deleted file mode 100644 index 1ced1239e5530..0000000000000 --- a/polkadot/utils/staking-miner/tests/cli.rs +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -use assert_cmd::{cargo::cargo_bin, Command}; -use serde_json::{Result, Value}; - -#[test] -fn cli_version_works() { - let crate_name = env!("CARGO_PKG_NAME"); - let output = Command::new(cargo_bin(crate_name)).arg("--version").output().unwrap(); - - assert!(output.status.success(), "command returned with non-success exit code"); - let version = String::from_utf8_lossy(&output.stdout).trim().to_owned(); - - assert_eq!(version, format!("{} {}", crate_name, env!("CARGO_PKG_VERSION"))); -} - -#[test] -fn cli_info_works() { - let crate_name = env!("CARGO_PKG_NAME"); - let output = Command::new(cargo_bin(crate_name)) - .arg("info") - .arg("--json") - .env("RUST_LOG", "none") - .output() - .unwrap(); - - assert!(output.status.success(), "command returned with non-success exit code"); - let info = String::from_utf8_lossy(&output.stdout).trim().to_owned(); - let v: Result = serde_json::from_str(&info); - let v = v.unwrap(); - assert!(!v["builtin"].to_string().is_empty()); - assert!(!v["builtin"]["spec_name"].to_string().is_empty()); - assert!(!v["builtin"]["spec_version"].to_string().is_empty()); - assert!(!v["remote"].to_string().is_empty()); -} diff --git a/substrate/frame/election-provider-multi-phase/src/lib.rs b/substrate/frame/election-provider-multi-phase/src/lib.rs index f26a6f40d4267..0d751e3f9cb08 100644 --- a/substrate/frame/election-provider-multi-phase/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/src/lib.rs @@ -149,7 +149,8 @@ //! while this binary lives in the Polkadot repository, this particular subcommand of it can work //! against any substrate-based chain. //! -//! See the `staking-miner` documentation in the Polkadot repository for more information. +//! See the [`staking-miner`](https://github.com/paritytech/staking-miner-v2) docs for more +//! information. //! //! ## Feasible Solution (correct solution) //! From 2c8021f998a4b3f3e9c0416c7886ef05803aba9d Mon Sep 17 00:00:00 2001 From: s0me0ne-unkn0wn <48632512+s0me0ne-unkn0wn@users.noreply.github.com> Date: Mon, 11 Sep 2023 19:14:07 +0200 Subject: [PATCH 03/16] Retire puppet workers (#1449) Closes #583 After the separation of PVF worker binaries, dedicated puppet workers are not needed for tests anymore. The production workers can be used instead, avoiding some code duplication and decreasing complexity. The changes also make it possible to further refactor the code to isolate workers completely. --- Cargo.lock | 12 -- Cargo.toml | 1 - .../Cargo.toml | 15 -- .../relay-validation-worker-provider/build.rs | 169 ------------------ .../src/lib.rs | 27 --- cumulus/test/service/Cargo.toml | 1 - cumulus/test/service/src/lib.rs | 9 +- polkadot/node/core/pvf/Cargo.toml | 8 - .../node/core/pvf/common/src/worker/mod.rs | 4 + polkadot/node/core/pvf/src/lib.rs | 10 -- polkadot/node/core/pvf/src/testing.rs | 42 ----- polkadot/node/core/pvf/tests/README.md | 9 + polkadot/node/core/pvf/tests/it/main.rs | 17 +- .../node/core/pvf/tests/it/worker_common.rs | 35 ++-- .../test-parachains/adder/collator/Cargo.toml | 15 +- .../adder/collator/bin/puppet_worker.rs | 17 -- .../adder/collator/tests/integration.rs | 10 +- .../undying/collator/Cargo.toml | 19 +- .../undying/collator/bin/puppet_worker.rs | 17 -- .../undying/collator/tests/integration.rs | 10 +- 20 files changed, 71 insertions(+), 376 deletions(-) delete mode 100644 cumulus/test/relay-validation-worker-provider/Cargo.toml delete mode 100644 cumulus/test/relay-validation-worker-provider/build.rs delete mode 100644 cumulus/test/relay-validation-worker-provider/src/lib.rs create mode 100644 polkadot/node/core/pvf/tests/README.md delete mode 100644 polkadot/parachain/test-parachains/adder/collator/bin/puppet_worker.rs delete mode 100644 polkadot/parachain/test-parachains/undying/collator/bin/puppet_worker.rs diff --git a/Cargo.lock b/Cargo.lock index 3d4bc17563ba7..0623d96ef1cbe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3899,14 +3899,6 @@ dependencies = [ "sp-trie", ] -[[package]] -name = "cumulus-test-relay-validation-worker-provider" -version = "0.1.0" -dependencies = [ - "polkadot-node-core-pvf", - "toml 0.7.6", -] - [[package]] name = "cumulus-test-runtime" version = "0.1.0" @@ -3958,7 +3950,6 @@ dependencies = [ "cumulus-relay-chain-minimal-node", "cumulus-test-client", "cumulus-test-relay-sproof-builder", - "cumulus-test-relay-validation-worker-provider", "cumulus-test-runtime", "frame-system", "frame-system-rpc-runtime-api", @@ -12018,7 +12009,6 @@ dependencies = [ "slotmap", "sp-core", "sp-maybe-compressed-blob", - "sp-tracing", "sp-wasm-interface", "substrate-build-script-utils", "tempfile", @@ -18462,7 +18452,6 @@ dependencies = [ "sp-keyring", "substrate-test-utils", "test-parachain-adder", - "test-parachain-adder-collator", "tokio", ] @@ -18511,7 +18500,6 @@ dependencies = [ "sp-keyring", "substrate-test-utils", "test-parachain-undying", - "test-parachain-undying-collator", "tokio", ] diff --git a/Cargo.toml b/Cargo.toml index 89fb007058aa3..d1078e3c86a82 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -92,7 +92,6 @@ members = [ "cumulus/primitives/utility", "cumulus/test/client", "cumulus/test/relay-sproof-builder", - "cumulus/test/relay-validation-worker-provider", "cumulus/test/runtime", "cumulus/test/service", "cumulus/xcm/xcm-emulator", diff --git a/cumulus/test/relay-validation-worker-provider/Cargo.toml b/cumulus/test/relay-validation-worker-provider/Cargo.toml deleted file mode 100644 index b7c59e8329958..0000000000000 --- a/cumulus/test/relay-validation-worker-provider/Cargo.toml +++ /dev/null @@ -1,15 +0,0 @@ -[package] -name = "cumulus-test-relay-validation-worker-provider" -version = "0.1.0" -authors.workspace = true -edition.workspace = true -build = "build.rs" -publish = false - -[dependencies] - -# Polkadot -polkadot-node-core-pvf = { path = "../../../polkadot/node/core/pvf", features = ["test-utils"] } - -[build-dependencies] -toml = "0.7.6" diff --git a/cumulus/test/relay-validation-worker-provider/build.rs b/cumulus/test/relay-validation-worker-provider/build.rs deleted file mode 100644 index 60bb950db1fcc..0000000000000 --- a/cumulus/test/relay-validation-worker-provider/build.rs +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -use std::{ - env, fs, - path::{Path, PathBuf}, - process::{self, Command}, -}; -use toml::value::Table; - -/// The name of the project we will building. -const PROJECT_NAME: &str = "validation-worker"; -/// The env variable that instructs us to skip the build. -const SKIP_ENV: &str = "SKIP_BUILD"; - -fn main() { - if env::var(SKIP_ENV).is_ok() { - return - } - - let out_dir = PathBuf::from(env::var("OUT_DIR").expect("`OUT_DIR` is set by cargo")); - - let project = create_project(&out_dir); - build_project(&project.join("Cargo.toml")); - - fs::copy(project.join("target/release").join(PROJECT_NAME), out_dir.join(PROJECT_NAME)) - .expect("Copies validation worker"); -} - -fn find_cargo_lock() -> PathBuf { - let mut path = PathBuf::from( - env::var("CARGO_MANIFEST_DIR").expect("`CARGO_MANIFEST_DIR` is set by cargo"), - ); - - loop { - if path.join("Cargo.lock").exists() { - return path.join("Cargo.lock") - } - - if !path.pop() { - panic!("Could not find `Cargo.lock`") - } - } -} - -fn create_project(out_dir: &Path) -> PathBuf { - let project_dir = out_dir.join(format!("{}-project", PROJECT_NAME)); - fs::create_dir_all(project_dir.join("src")).expect("Creates project dir and project src dir"); - - let mut project_toml = Table::new(); - - let mut package = Table::new(); - package.insert("name".into(), PROJECT_NAME.into()); - package.insert("version".into(), "1.0.0".into()); - package.insert("edition".into(), "2021".into()); - - project_toml.insert("package".into(), package.into()); - - project_toml.insert("workspace".into(), Table::new().into()); - - let mut dependencies = Table::new(); - - let mut dependency_project = Table::new(); - dependency_project.insert( - "path".into(), - env::var("CARGO_MANIFEST_DIR") - .expect("`CARGO_MANIFEST_DIR` is set by cargo") - .into(), - ); - - dependencies - .insert("cumulus-test-relay-validation-worker-provider".into(), dependency_project.into()); - - project_toml.insert("dependencies".into(), dependencies.into()); - - add_patches(&mut project_toml); - - fs::write( - project_dir.join("Cargo.toml"), - toml::to_string_pretty(&project_toml).expect("Wasm workspace toml is valid; qed"), - ) - .expect("Writes project `Cargo.toml`"); - - fs::write( - project_dir.join("src").join("main.rs"), - r#" - cumulus_test_relay_validation_worker_provider::polkadot_node_core_pvf::decl_puppet_worker_main!(); - "#, - ) - .expect("Writes `main.rs`"); - - let cargo_lock = find_cargo_lock(); - fs::copy(&cargo_lock, project_dir.join("Cargo.lock")).expect("Copies `Cargo.lock`"); - println!("cargo:rerun-if-changed={}", cargo_lock.display()); - - project_dir -} - -fn add_patches(project_toml: &mut Table) { - let workspace_toml_path = PathBuf::from( - env::var("CARGO_MANIFEST_DIR").expect("`CARGO_MANIFEST_DIR` is set by cargo"), - ) - .join("../../../Cargo.toml"); - - let mut workspace_toml: Table = toml::from_str( - &fs::read_to_string(&workspace_toml_path).expect("Workspace root `Cargo.toml` exists; qed"), - ) - .expect("Workspace root `Cargo.toml` is a valid toml file; qed"); - - let mut workspace_path = workspace_toml_path; - workspace_path.pop(); - - while let Some(mut patch) = - workspace_toml.remove("patch").and_then(|p| p.try_into::().ok()) - { - // Iterate over all patches and make the patch path absolute from the workspace root path. - patch - .iter_mut() - .filter_map(|p| { - p.1.as_table_mut().map(|t| t.iter_mut().filter_map(|t| t.1.as_table_mut())) - }) - .flatten() - .for_each(|p| { - p.iter_mut().filter(|(k, _)| k == &"path").for_each(|(_, v)| { - if let Some(path) = v.as_str().map(PathBuf::from) { - if path.is_relative() { - *v = workspace_path.join(path).display().to_string().into(); - } - } - }) - }); - - project_toml.insert("patch".into(), patch.into()); - } -} - -fn build_project(cargo_toml: &Path) { - let cargo = env::var("CARGO").expect("`CARGO` env variable is always set by cargo"); - - let status = Command::new(cargo) - .arg("build") - .arg("--release") - .arg(format!("--manifest-path={}", cargo_toml.display())) - // Unset the `CARGO_TARGET_DIR` to prevent a cargo deadlock (cargo locks a target dir - // exclusive). - .env_remove("CARGO_TARGET_DIR") - // Do not call us recursively. - .env(SKIP_ENV, "1") - .status(); - - match status.map(|s| s.success()) { - Ok(true) => {}, - // Use `process.exit(1)` to have a clean error output. - _ => process::exit(1), - } -} diff --git a/cumulus/test/relay-validation-worker-provider/src/lib.rs b/cumulus/test/relay-validation-worker-provider/src/lib.rs deleted file mode 100644 index 6c3f4182b03bd..0000000000000 --- a/cumulus/test/relay-validation-worker-provider/src/lib.rs +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Provides the [`VALIDATION_WORKER`] for integration tests in Cumulus. -//! -//! The validation worker is used by the relay chain to validate parachains. This worker is placed -//! in an extra process to provide better security and to ensure that a worker can be killed etc. -//! -//! !!This should only be used for tests!! - -pub use polkadot_node_core_pvf; - -/// The path to the validation worker. -pub const VALIDATION_WORKER: &str = concat!(env!("OUT_DIR"), "/validation-worker"); diff --git a/cumulus/test/service/Cargo.toml b/cumulus/test/service/Cargo.toml index 04d53545ead71..c65eb0dd024fe 100644 --- a/cumulus/test/service/Cargo.toml +++ b/cumulus/test/service/Cargo.toml @@ -72,7 +72,6 @@ cumulus-primitives-core = { path = "../../primitives/core" } cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent" } cumulus-relay-chain-inprocess-interface = { path = "../../client/relay-chain-inprocess-interface" } cumulus-relay-chain-interface = { path = "../../client/relay-chain-interface" } -cumulus-test-relay-validation-worker-provider = { path = "../relay-validation-worker-provider" } cumulus-test-runtime = { path = "../runtime" } cumulus-relay-chain-minimal-node = { path = "../../client/relay-chain-minimal-node" } cumulus-client-pov-recovery = { path = "../../client/pov-recovery" } diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 3275aabc4d860..a721645546af7 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -903,8 +903,9 @@ pub fn run_relay_chain_validator_node( config.rpc_addr = Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), port)); } - polkadot_test_service::run_validator_node( - config, - Some(cumulus_test_relay_validation_worker_provider::VALIDATION_WORKER.into()), - ) + let mut workers_path = std::env::current_exe().unwrap(); + workers_path.pop(); + workers_path.pop(); + + polkadot_test_service::run_validator_node(config, Some(workers_path)) } diff --git a/polkadot/node/core/pvf/Cargo.toml b/polkadot/node/core/pvf/Cargo.toml index ad9120295d229..478d1952d9d91 100644 --- a/polkadot/node/core/pvf/Cargo.toml +++ b/polkadot/node/core/pvf/Cargo.toml @@ -6,11 +6,6 @@ authors.workspace = true edition.workspace = true license.workspace = true -[[bin]] -name = "puppet_worker" -path = "bin/puppet_worker.rs" -required-features = ["test-utils"] - [dependencies] always-assert = "0.1" futures = "0.3.21" @@ -35,7 +30,6 @@ polkadot-primitives = { path = "../../../primitives" } sp-core = { path = "../../../../substrate/primitives/core" } sp-wasm-interface = { path = "../../../../substrate/primitives/wasm-interface" } sp-maybe-compressed-blob = { path = "../../../../substrate/primitives/maybe-compressed-blob" } -sp-tracing = { path = "../../../../substrate/primitives/tracing", optional = true } polkadot-node-core-pvf-prepare-worker = { path = "prepare-worker", optional = true } polkadot-node-core-pvf-execute-worker = { path = "execute-worker", optional = true } @@ -56,9 +50,7 @@ halt = { package = "test-parachain-halt", path = "../../../parachain/test-parach ci-only-tests = [] jemalloc-allocator = [ "polkadot-node-core-pvf-common/jemalloc-allocator" ] # This feature is used to export test code to other crates without putting it in the production build. -# This is also used by the `puppet_worker` binary. test-utils = [ "polkadot-node-core-pvf-execute-worker", "polkadot-node-core-pvf-prepare-worker", - "sp-tracing", ] diff --git a/polkadot/node/core/pvf/common/src/worker/mod.rs b/polkadot/node/core/pvf/common/src/worker/mod.rs index 40e540bb3f7eb..a3f8e777c48b8 100644 --- a/polkadot/node/core/pvf/common/src/worker/mod.rs +++ b/polkadot/node/core/pvf/common/src/worker/mod.rs @@ -60,6 +60,10 @@ macro_rules! decl_worker_main { println!("{}", $worker_version); return }, + "test-sleep" => { + std::thread::sleep(std::time::Duration::from_secs(5)); + return + }, subcommand => { // Must be passed for compatibility with the single-binary test workers. if subcommand != $expected_command { diff --git a/polkadot/node/core/pvf/src/lib.rs b/polkadot/node/core/pvf/src/lib.rs index c3a7a4613139e..0e4f2444adf72 100644 --- a/polkadot/node/core/pvf/src/lib.rs +++ b/polkadot/node/core/pvf/src/lib.rs @@ -100,10 +100,6 @@ mod worker_intf; #[cfg(feature = "test-utils")] pub mod testing; -// Used by `decl_puppet_worker_main!`. -#[cfg(feature = "test-utils")] -pub use sp_tracing; - pub use error::{InvalidCandidate, ValidationError}; pub use host::{start, Config, ValidationHost, EXECUTE_BINARY_NAME, PREPARE_BINARY_NAME}; pub use metrics::Metrics; @@ -117,11 +113,5 @@ pub use polkadot_node_core_pvf_common::{ pvf::PvfPrepData, }; -// Re-export worker entrypoints. -#[cfg(feature = "test-utils")] -pub use polkadot_node_core_pvf_execute_worker::worker_entrypoint as execute_worker_entrypoint; -#[cfg(feature = "test-utils")] -pub use polkadot_node_core_pvf_prepare_worker::worker_entrypoint as prepare_worker_entrypoint; - /// The log target for this crate. pub const LOG_TARGET: &str = "parachain::pvf"; diff --git a/polkadot/node/core/pvf/src/testing.rs b/polkadot/node/core/pvf/src/testing.rs index 980a28c01566c..4301afc3cc7ea 100644 --- a/polkadot/node/core/pvf/src/testing.rs +++ b/polkadot/node/core/pvf/src/testing.rs @@ -47,45 +47,3 @@ pub fn validate_candidate( Ok(result) } - -/// Use this macro to declare a `fn main() {}` that will check the arguments and dispatch them to -/// the appropriate worker, making the executable that can be used for spawning workers. -#[macro_export] -macro_rules! decl_puppet_worker_main { - () => { - fn main() { - $crate::sp_tracing::try_init_simple(); - - let args = std::env::args().collect::>(); - if args.len() == 1 { - panic!("wrong number of arguments"); - } - - let entrypoint = match args[1].as_ref() { - "exit" => { - std::process::exit(1); - }, - "sleep" => { - std::thread::sleep(std::time::Duration::from_secs(5)); - return - }, - "prepare-worker" => $crate::prepare_worker_entrypoint, - "execute-worker" => $crate::execute_worker_entrypoint, - other => panic!("unknown subcommand: {}", other), - }; - - let mut node_version = None; - let mut socket_path: &str = ""; - - for i in (2..args.len()).step_by(2) { - match args[i].as_ref() { - "--socket-path" => socket_path = args[i + 1].as_str(), - "--node-impl-version" => node_version = Some(args[i + 1].as_str()), - arg => panic!("Unexpected argument found: {}", arg), - } - } - - entrypoint(&socket_path, node_version, None); - } - }; -} diff --git a/polkadot/node/core/pvf/tests/README.md b/polkadot/node/core/pvf/tests/README.md new file mode 100644 index 0000000000000..27385e190250d --- /dev/null +++ b/polkadot/node/core/pvf/tests/README.md @@ -0,0 +1,9 @@ +# PVF host integration tests + +## Testing + +Before running these tests, make sure the worker binaries are built first. This can be done with: + +```sh +cargo build --bin polkadot-execute-worker --bin polkadot-prepare-worker +``` diff --git a/polkadot/node/core/pvf/tests/it/main.rs b/polkadot/node/core/pvf/tests/it/main.rs index 12d87ee442622..dc8f00098ec5e 100644 --- a/polkadot/node/core/pvf/tests/it/main.rs +++ b/polkadot/node/core/pvf/tests/it/main.rs @@ -33,7 +33,6 @@ use tokio::sync::Mutex; mod adder; mod worker_common; -const PUPPET_EXE: &str = env!("CARGO_BIN_EXE_puppet_worker"); const TEST_EXECUTION_TIMEOUT: Duration = Duration::from_secs(3); const TEST_PREPARATION_TIMEOUT: Duration = Duration::from_secs(3); @@ -51,10 +50,20 @@ impl TestHost { where F: FnOnce(&mut Config), { + let mut workers_path = std::env::current_exe().unwrap(); + workers_path.pop(); + workers_path.pop(); + let mut prepare_worker_path = workers_path.clone(); + prepare_worker_path.push("polkadot-prepare-worker"); + let mut execute_worker_path = workers_path.clone(); + execute_worker_path.push("polkadot-execute-worker"); let cache_dir = tempfile::tempdir().unwrap(); - let program_path = std::path::PathBuf::from(PUPPET_EXE); - let mut config = - Config::new(cache_dir.path().to_owned(), None, program_path.clone(), program_path); + let mut config = Config::new( + cache_dir.path().to_owned(), + None, + prepare_worker_path, + execute_worker_path, + ); f(&mut config); let (host, task) = start(config, Metrics::default()); let _ = tokio::task::spawn(task); diff --git a/polkadot/node/core/pvf/tests/it/worker_common.rs b/polkadot/node/core/pvf/tests/it/worker_common.rs index a3bf552e894a5..875ae79af0973 100644 --- a/polkadot/node/core/pvf/tests/it/worker_common.rs +++ b/polkadot/node/core/pvf/tests/it/worker_common.rs @@ -14,26 +14,41 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use std::time::Duration; - use polkadot_node_core_pvf::testing::{spawn_with_program_path, SpawnErr}; +use std::time::Duration; -use crate::PUPPET_EXE; +fn worker_path(name: &str) -> std::path::PathBuf { + let mut worker_path = std::env::current_exe().unwrap(); + worker_path.pop(); + worker_path.pop(); + worker_path.push(name); + worker_path +} // Test spawning a program that immediately exits with a failure code. #[tokio::test] async fn spawn_immediate_exit() { - let result = - spawn_with_program_path("integration-test", PUPPET_EXE, &["exit"], Duration::from_secs(2)) - .await; + // There's no explicit `exit` subcommand in the worker; it will panic on an unknown + // subcommand anyway + let result = spawn_with_program_path( + "integration-test", + worker_path("polkadot-prepare-worker"), + &["exit"], + Duration::from_secs(2), + ) + .await; assert!(matches!(result, Err(SpawnErr::AcceptTimeout))); } #[tokio::test] async fn spawn_timeout() { - let result = - spawn_with_program_path("integration-test", PUPPET_EXE, &["sleep"], Duration::from_secs(2)) - .await; + let result = spawn_with_program_path( + "integration-test", + worker_path("polkadot-execute-worker"), + &["test-sleep"], + Duration::from_secs(2), + ) + .await; assert!(matches!(result, Err(SpawnErr::AcceptTimeout))); } @@ -41,7 +56,7 @@ async fn spawn_timeout() { async fn should_connect() { let _ = spawn_with_program_path( "integration-test", - PUPPET_EXE, + worker_path("polkadot-prepare-worker"), &["prepare-worker"], Duration::from_secs(2), ) diff --git a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml index 7079ab7327048..5c93d716528ab 100644 --- a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml @@ -11,11 +11,6 @@ license.workspace = true name = "adder-collator" path = "src/main.rs" -[[bin]] -name = "adder_collator_puppet_worker" -path = "bin/puppet_worker.rs" -required-features = ["test-utils"] - [dependencies] parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } clap = { version = "4.4.2", features = ["derive"] } @@ -33,25 +28,17 @@ polkadot-node-subsystem = { path = "../../../../node/subsystem" } sc-cli = { path = "../../../../../substrate/client/cli" } sp-core = { path = "../../../../../substrate/primitives/core" } sc-service = { path = "../../../../../substrate/client/service" } -# This one is tricky. Even though it is not used directly by the collator, we still need it for the -# `puppet_worker` binary, which is required for the integration test. However, this shouldn't be -# a big problem since it is used transitively anyway. -polkadot-node-core-pvf = { path = "../../../../node/core/pvf", features = ["test-utils"], optional = true } [dev-dependencies] polkadot-parachain-primitives = { path = "../../.." } polkadot-test-service = { path = "../../../../node/test/service" } +polkadot-node-core-pvf = { path = "../../../../node/core/pvf", features = ["test-utils"] } substrate-test-utils = { path = "../../../../../substrate/test-utils" } sc-service = { path = "../../../../../substrate/client/service" } sp-keyring = { path = "../../../../../substrate/primitives/keyring" } -# For the puppet worker, depend on ourselves with the test-utils feature. -test-parachain-adder-collator = { path = "", features = ["test-utils"] } tokio = { version = "1.24.2", features = ["macros"] } [features] network-protocol-staging = [ "polkadot-cli/network-protocol-staging" ] -# This feature is used to export test code to other crates without putting it in the production build. -# This is also used by the `puppet_worker` binary. -test-utils = [ "polkadot-node-core-pvf/test-utils" ] diff --git a/polkadot/parachain/test-parachains/adder/collator/bin/puppet_worker.rs b/polkadot/parachain/test-parachains/adder/collator/bin/puppet_worker.rs deleted file mode 100644 index 7f93519d84540..0000000000000 --- a/polkadot/parachain/test-parachains/adder/collator/bin/puppet_worker.rs +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -polkadot_node_core_pvf::decl_puppet_worker_main!(); diff --git a/polkadot/parachain/test-parachains/adder/collator/tests/integration.rs b/polkadot/parachain/test-parachains/adder/collator/tests/integration.rs index 6b481f961a429..85abf8bf36b97 100644 --- a/polkadot/parachain/test-parachains/adder/collator/tests/integration.rs +++ b/polkadot/parachain/test-parachains/adder/collator/tests/integration.rs @@ -17,8 +17,6 @@ //! Integration test that ensures that we can build and include parachain //! blocks of the adder parachain. -const PUPPET_EXE: &str = env!("CARGO_BIN_EXE_adder_collator_puppet_worker"); - // If this test is failing, make sure to run all tests with the `real-overseer` feature being // enabled. @@ -41,8 +39,12 @@ async fn collating_using_adder_collator() { true, ); + let mut workers_path = std::env::current_exe().unwrap(); + workers_path.pop(); + workers_path.pop(); + // start alice - let alice = polkadot_test_service::run_validator_node(alice_config, Some(PUPPET_EXE.into())); + let alice = polkadot_test_service::run_validator_node(alice_config, Some(workers_path.clone())); let bob_config = polkadot_test_service::node_config( || {}, @@ -53,7 +55,7 @@ async fn collating_using_adder_collator() { ); // start bob - let bob = polkadot_test_service::run_validator_node(bob_config, Some(PUPPET_EXE.into())); + let bob = polkadot_test_service::run_validator_node(bob_config, Some(workers_path)); let collator = test_parachain_adder_collator::Collator::new(); diff --git a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml index 0f1fd60a90001..9cdacbc56575b 100644 --- a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml @@ -11,15 +11,10 @@ publish = false name = "undying-collator" path = "src/main.rs" -[[bin]] -name = "undying_collator_puppet_worker" -path = "bin/puppet_worker.rs" -required-features = ["test-utils"] - [dependencies] parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } clap = { version = "4.4.2", features = ["derive"] } -futures = "0.3.19" +futures = "0.3.21" futures-timer = "3.0.2" log = "0.4.17" @@ -33,24 +28,14 @@ polkadot-node-subsystem = { path = "../../../../node/subsystem" } sc-cli = { path = "../../../../../substrate/client/cli" } sp-core = { path = "../../../../../substrate/primitives/core" } sc-service = { path = "../../../../../substrate/client/service" } -# This one is tricky. Even though it is not used directly by the collator, we still need it for the -# `puppet_worker` binary, which is required for the integration test. However, this shouldn't be -# a big problem since it is used transitively anyway. -polkadot-node-core-pvf = { path = "../../../../node/core/pvf", features = ["test-utils"], optional = true } [dev-dependencies] polkadot-parachain-primitives = { path = "../../.." } polkadot-test-service = { path = "../../../../node/test/service" } -# For the puppet worker, depend on ourselves with the test-utils feature. -test-parachain-undying-collator = { path = "", features = ["test-utils"] } +polkadot-node-core-pvf = { path = "../../../../node/core/pvf", features = ["test-utils"] } substrate-test-utils = { path = "../../../../../substrate/test-utils" } sc-service = { path = "../../../../../substrate/client/service" } sp-keyring = { path = "../../../../../substrate/primitives/keyring" } tokio = { version = "1.24.2", features = ["macros"] } - -[features] -# This feature is used to export test code to other crates without putting it in the production build. -# This is also used by the `puppet_worker` binary. -test-utils = [ "polkadot-node-core-pvf/test-utils" ] diff --git a/polkadot/parachain/test-parachains/undying/collator/bin/puppet_worker.rs b/polkadot/parachain/test-parachains/undying/collator/bin/puppet_worker.rs deleted file mode 100644 index 7f93519d84540..0000000000000 --- a/polkadot/parachain/test-parachains/undying/collator/bin/puppet_worker.rs +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -polkadot_node_core_pvf::decl_puppet_worker_main!(); diff --git a/polkadot/parachain/test-parachains/undying/collator/tests/integration.rs b/polkadot/parachain/test-parachains/undying/collator/tests/integration.rs index a98a7ff6eefc4..8be535b9bb4cc 100644 --- a/polkadot/parachain/test-parachains/undying/collator/tests/integration.rs +++ b/polkadot/parachain/test-parachains/undying/collator/tests/integration.rs @@ -17,8 +17,6 @@ //! Integration test that ensures that we can build and include parachain //! blocks of the `Undying` parachain. -const PUPPET_EXE: &str = env!("CARGO_BIN_EXE_undying_collator_puppet_worker"); - // If this test is failing, make sure to run all tests with the `real-overseer` feature being // enabled. #[tokio::test(flavor = "multi_thread")] @@ -40,8 +38,12 @@ async fn collating_using_undying_collator() { true, ); + let mut workers_path = std::env::current_exe().unwrap(); + workers_path.pop(); + workers_path.pop(); + // start alice - let alice = polkadot_test_service::run_validator_node(alice_config, Some(PUPPET_EXE.into())); + let alice = polkadot_test_service::run_validator_node(alice_config, Some(workers_path.clone())); let bob_config = polkadot_test_service::node_config( || {}, @@ -52,7 +54,7 @@ async fn collating_using_undying_collator() { ); // start bob - let bob = polkadot_test_service::run_validator_node(bob_config, Some(PUPPET_EXE.into())); + let bob = polkadot_test_service::run_validator_node(bob_config, Some(workers_path)); let collator = test_parachain_undying_collator::Collator::new(1_000, 1); From 44dbb73945a20fa16ed004d50827c63c0921cbd0 Mon Sep 17 00:00:00 2001 From: Vsevolod Stakhov Date: Mon, 11 Sep 2023 19:33:51 +0100 Subject: [PATCH 04/16] Allow to broadcast network messages in parallel (#1409) This PR addresses multiple issues pending: * [x] Update orchestra to the recent version and test how the node performs * [x] Add some useful metrics for outbound network bridge * [x] Try to send incoming network requests to all subsystems without blocking on some particular subsystem in that loop * [x] Fix all incompatibilities between orchestra and polkadot code (e.g. malus node) --- Cargo.lock | 41 +++++------ .../Cargo.toml | 2 +- polkadot/node/malus/src/interceptor.rs | 62 ++++++++++++++--- polkadot/node/malus/src/variants/common.rs | 7 -- polkadot/node/metrics/Cargo.toml | 3 +- polkadot/node/network/bridge/src/metrics.rs | 53 ++++++++++++++ polkadot/node/network/bridge/src/network.rs | 1 + polkadot/node/network/bridge/src/rx/mod.rs | 69 ++++++++++++++++--- polkadot/node/network/bridge/src/tx/mod.rs | 15 ++++ polkadot/node/overseer/Cargo.toml | 9 +-- polkadot/node/overseer/src/lib.rs | 2 +- polkadot/node/overseer/src/tests.rs | 1 + .../node/subsystem-test-helpers/src/lib.rs | 10 ++- polkadot/node/subsystem-types/Cargo.toml | 2 +- polkadot/node/subsystem-util/Cargo.toml | 2 +- 15 files changed, 223 insertions(+), 56 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0623d96ef1cbe..686ee62add3aa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4841,18 +4841,6 @@ dependencies = [ "quote", ] -[[package]] -name = "expander" -version = "0.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3774182a5df13c3d1690311ad32fbe913feef26baba609fa2dd5f72042bd2ab6" -dependencies = [ - "blake2", - "fs-err", - "proc-macro2", - "quote", -] - [[package]] name = "expander" version = "2.0.0" @@ -6896,6 +6884,15 @@ dependencies = [ "thiserror", ] +[[package]] +name = "layout-rs" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1164ef87cb9607c2d887216eca79f0fc92895affe1789bba805dd38d829584e0" +dependencies = [ + "log", +] + [[package]] name = "lazy_static" version = "1.4.0" @@ -8692,9 +8689,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "orchestra" -version = "0.0.5" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "227585216d05ba65c7ab0a0450a3cf2cbd81a98862a54c4df8e14d5ac6adb015" +checksum = "46d78e1deb2a8d54fc1f063a544130db4da31dfe4d5d3b493186424910222a76" dependencies = [ "async-trait", "dyn-clonable", @@ -8709,12 +8706,16 @@ dependencies = [ [[package]] name = "orchestra-proc-macro" -version = "0.0.5" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2871aadd82a2c216ee68a69837a526dfe788ecbe74c4c5038a6acdbff6653066" +checksum = "d035b1f968d91a826f2e34a9d6d02cb2af5aa7ca39ebd27922d850ab4b2dd2c6" dependencies = [ - "expander 0.0.6", - "itertools 0.10.5", + "anyhow", + "expander 2.0.0", + "fs-err", + "indexmap 2.0.0", + "itertools 0.11.0", + "layout-rs", "petgraph", "proc-macro-crate", "proc-macro2", @@ -13310,9 +13311,9 @@ dependencies = [ [[package]] name = "prioritized-metered-channel" -version = "0.2.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "382698e48a268c832d0b181ed438374a6bb708a82a8ca273bb0f61c74cf209c4" +checksum = "e99f0c89bd88f393aab44a4ab949351f7bc7e7e1179d11ecbfe50cbe4c47e342" dependencies = [ "coarsetime", "crossbeam-queue", diff --git a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml index c198b22cad4a8..39eda5075e29e 100644 --- a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml +++ b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml @@ -37,7 +37,7 @@ sp-keyring = { path = "../../../substrate/primitives/keyring" } # Polkadot polkadot-primitives = { path = "../../../polkadot/primitives" } polkadot-test-client = { path = "../../../polkadot/node/test/client" } -metered = { package = "prioritized-metered-channel", version = "0.2.0" } +metered = { package = "prioritized-metered-channel", version = "0.5.1", default-features = false, features=["futures_channel"] } # Cumulus cumulus-test-service = { path = "../../test/service" } diff --git a/polkadot/node/malus/src/interceptor.rs b/polkadot/node/malus/src/interceptor.rs index cbf39bccd1602..04ee0905deeb0 100644 --- a/polkadot/node/malus/src/interceptor.rs +++ b/polkadot/node/malus/src/interceptor.rs @@ -47,12 +47,20 @@ where Some(msg) } - /// Modify outgoing messages. + /// Specifies if we need to replace some outgoing message with another (potentially empty) + /// message + fn need_intercept_outgoing( + &self, + _msg: &::OutgoingMessages, + ) -> bool { + false + } + /// Send modified message instead of the original one fn intercept_outgoing( &self, - msg: ::OutgoingMessages, + _msg: &::OutgoingMessages, ) -> Option<::OutgoingMessages> { - Some(msg) + None } } @@ -66,7 +74,7 @@ pub struct InterceptedSender { #[async_trait::async_trait] impl overseer::SubsystemSender for InterceptedSender where - OutgoingMessage: overseer::AssociateOutgoing + Send + 'static, + OutgoingMessage: overseer::AssociateOutgoing + Send + 'static + TryFrom, Sender: overseer::SubsystemSender + overseer::SubsystemSender< < @@ -78,17 +86,48 @@ where < >::Message as overseer::AssociateOutgoing >::OutgoingMessages: - From, + From + Send + Sync, + >::Error: std::fmt::Debug, { async fn send_message(&mut self, msg: OutgoingMessage) { let msg = < <>::Message as overseer::AssociateOutgoing >::OutgoingMessages as From>::from(msg); - if let Some(msg) = self.message_filter.intercept_outgoing(msg) { + if self.message_filter.need_intercept_outgoing(&msg) { + if let Some(msg) = self.message_filter.intercept_outgoing(&msg) { + self.inner.send_message(msg).await; + } + } + else { self.inner.send_message(msg).await; } } + fn try_send_message(&mut self, msg: OutgoingMessage) -> Result<(), TrySendError> { + let msg = < + <>::Message as overseer::AssociateOutgoing + >::OutgoingMessages as From>::from(msg); + if self.message_filter.need_intercept_outgoing(&msg) { + if let Some(real_msg) = self.message_filter.intercept_outgoing(&msg) { + let orig_msg : OutgoingMessage = msg.into().try_into().expect("must be able to recover the original message"); + self.inner.try_send_message(real_msg).map_err(|e| { + match e { + TrySendError::Full(_) => TrySendError::Full(orig_msg), + TrySendError::Closed(_) => TrySendError::Closed(orig_msg), + } + }) + } + else { + // No message to send after intercepting + Ok(()) + } + } + else { + let orig_msg : OutgoingMessage = msg.into().try_into().expect("must be able to recover the original message"); + self.inner.try_send_message(orig_msg) + } + } + async fn send_messages(&mut self, msgs: T) where T: IntoIterator + Send, @@ -101,9 +140,14 @@ where fn send_unbounded_message(&mut self, msg: OutgoingMessage) { let msg = < - <>::Message as overseer::AssociateOutgoing - >::OutgoingMessages as From>::from(msg); - if let Some(msg) = self.message_filter.intercept_outgoing(msg) { + <>::Message as overseer::AssociateOutgoing + >::OutgoingMessages as From>::from(msg); + if self.message_filter.need_intercept_outgoing(&msg) { + if let Some(msg) = self.message_filter.intercept_outgoing(&msg) { + self.inner.send_unbounded_message(msg); + } + } + else { self.inner.send_unbounded_message(msg); } } diff --git a/polkadot/node/malus/src/variants/common.rs b/polkadot/node/malus/src/variants/common.rs index bc5f6f92aedbc..365b2f16ac21b 100644 --- a/polkadot/node/malus/src/variants/common.rs +++ b/polkadot/node/malus/src/variants/common.rs @@ -498,11 +498,4 @@ where msg => Some(msg), } } - - fn intercept_outgoing( - &self, - msg: overseer::CandidateValidationOutgoingMessages, - ) -> Option { - Some(msg) - } } diff --git a/polkadot/node/metrics/Cargo.toml b/polkadot/node/metrics/Cargo.toml index d497fa7607a16..e13ae63199ff0 100644 --- a/polkadot/node/metrics/Cargo.toml +++ b/polkadot/node/metrics/Cargo.toml @@ -11,8 +11,7 @@ futures = "0.3.21" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../gum" } -metered = { package = "prioritized-metered-channel", version = "0.2.0" } - +metered = { package = "prioritized-metered-channel", version = "0.5.1", default-features = false, features=["futures_channel"] } # Both `sc-service` and `sc-cli` are required by runtime metrics `logger_hook()`. sc-service = { path = "../../../substrate/client/service" } sc-cli = { path = "../../../substrate/client/cli" } diff --git a/polkadot/node/network/bridge/src/metrics.rs b/polkadot/node/network/bridge/src/metrics.rs index bb90daad56761..083a2a71aa0f8 100644 --- a/polkadot/node/network/bridge/src/metrics.rs +++ b/polkadot/node/network/bridge/src/metrics.rs @@ -105,9 +105,27 @@ impl Metrics { pub fn on_report_event(&self) { if let Some(metrics) = self.0.as_ref() { + self.on_message("report_peer"); metrics.report_events.inc() } } + + pub fn on_message(&self, message_type: &'static str) { + if let Some(metrics) = self.0.as_ref() { + metrics.messages_sent.with_label_values(&[message_type]).inc() + } + } + + pub fn on_delayed_rx_queue(&self, queue_size: usize) { + if let Some(metrics) = self.0.as_ref() { + metrics.rx_delayed_processing.observe(queue_size as f64); + } + } + pub fn time_delayed_rx_events( + &self, + ) -> Option { + self.0.as_ref().map(|metrics| metrics.rx_delayed_processing_time.start_timer()) + } } #[derive(Clone)] @@ -123,6 +141,13 @@ pub(crate) struct MetricsInner { bytes_received: prometheus::CounterVec, bytes_sent: prometheus::CounterVec, + + messages_sent: prometheus::CounterVec, + // The reason why a `Histogram` is used to track a queue size is that + // we need not only an average size of the queue (that will be 0 normally), but + // we also need a dynamics for this queue size in case of messages delays. + rx_delayed_processing: prometheus::Histogram, + rx_delayed_processing_time: prometheus::Histogram, } impl metrics::Metrics for Metrics { @@ -217,6 +242,34 @@ impl metrics::Metrics for Metrics { )?, registry, )?, + messages_sent: prometheus::register( + prometheus::CounterVec::new( + prometheus::Opts::new( + "polkadot_parachain_messages_sent_total", + "The number of messages sent via network bridge", + ), + &["type"] + )?, + registry, + )?, + rx_delayed_processing: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "polkadot_parachain_network_bridge_rx_delayed", + "Number of events being delayed while broadcasting from the network bridge", + ).buckets(vec![0.0, 1.0, 2.0, 8.0, 16.0]), + )?, + registry, + )?, + rx_delayed_processing_time: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "polkadot_parachain_network_bridge_rx_delayed_time", + "Time spent for waiting of the delayed events", + ), + )?, + registry, + )?, }; Ok(Metrics(Some(metrics))) diff --git a/polkadot/node/network/bridge/src/network.rs b/polkadot/node/network/bridge/src/network.rs index 4f21212dcb64a..823e1254612f8 100644 --- a/polkadot/node/network/bridge/src/network.rs +++ b/polkadot/node/network/bridge/src/network.rs @@ -61,6 +61,7 @@ pub(crate) fn send_message( let message = { let encoded = message.encode(); metrics.on_notification_sent(peer_set, version, encoded.len(), peers.len()); + metrics.on_message(std::any::type_name::()); encoded }; diff --git a/polkadot/node/network/bridge/src/rx/mod.rs b/polkadot/node/network/bridge/src/rx/mod.rs index 51d248ca2d49f..e1125ebc904de 100644 --- a/polkadot/node/network/bridge/src/rx/mod.rs +++ b/polkadot/node/network/bridge/src/rx/mod.rs @@ -20,7 +20,10 @@ use super::*; use always_assert::never; use bytes::Bytes; -use futures::stream::BoxStream; +use futures::{ + future::BoxFuture, + stream::{BoxStream, FuturesUnordered, StreamExt}, +}; use parity_scale_codec::{Decode, DecodeAll}; use sc_network::Event as NetworkEvent; @@ -244,6 +247,7 @@ where NetworkBridgeEvent::PeerViewChange(peer, View::default()), ], &mut sender, + &metrics, ) .await; @@ -352,6 +356,7 @@ where dispatch_validation_event_to_all( NetworkBridgeEvent::PeerDisconnected(peer), &mut sender, + &metrics, ) .await, PeerSet::Collation => @@ -490,7 +495,7 @@ where network_service.report_peer(remote, report.into()); } - dispatch_validation_events_to_all(events, &mut sender).await; + dispatch_validation_events_to_all(events, &mut sender, &metrics).await; } if !c_messages.is_empty() { @@ -992,8 +997,9 @@ fn send_collation_message_vstaging( async fn dispatch_validation_event_to_all( event: NetworkBridgeEvent, ctx: &mut impl overseer::NetworkBridgeRxSenderTrait, + metrics: &Metrics, ) { - dispatch_validation_events_to_all(std::iter::once(event), ctx).await + dispatch_validation_events_to_all(std::iter::once(event), ctx, metrics).await } async fn dispatch_collation_event_to_all( @@ -1038,20 +1044,65 @@ fn dispatch_collation_event_to_all_unbounded( } } +fn send_or_queue_validation_event( + event: E, + sender: &mut Sender, + delayed_queue: &FuturesUnordered>, +) where + E: Send + 'static, + Sender: overseer::NetworkBridgeRxSenderTrait + overseer::SubsystemSender, +{ + match sender.try_send_message(event) { + Ok(()) => {}, + Err(overseer::TrySendError::Full(event)) => { + let mut sender = sender.clone(); + delayed_queue.push(Box::pin(async move { + sender.send_message(event).await; + })); + }, + Err(overseer::TrySendError::Closed(_)) => { + panic!( + "NetworkBridgeRxSender is closed when trying to send event of type: {}", + std::any::type_name::() + ); + }, + } +} + async fn dispatch_validation_events_to_all( events: I, sender: &mut impl overseer::NetworkBridgeRxSenderTrait, + metrics: &Metrics, ) where I: IntoIterator>, I::IntoIter: Send, { + let delayed_messages: FuturesUnordered> = FuturesUnordered::new(); + + // Fast path for sending events to subsystems, if any subsystem's queue is full, we hold + // the slow path future in the `delayed_messages` queue. for event in events { - sender - .send_messages(event.focus().map(StatementDistributionMessage::from)) - .await; - sender.send_messages(event.focus().map(BitfieldDistributionMessage::from)).await; - sender.send_messages(event.focus().map(ApprovalDistributionMessage::from)).await; - sender.send_messages(event.focus().map(GossipSupportMessage::from)).await; + if let Ok(msg) = event.focus().map(StatementDistributionMessage::from) { + send_or_queue_validation_event(msg, sender, &delayed_messages); + } + if let Ok(msg) = event.focus().map(BitfieldDistributionMessage::from) { + send_or_queue_validation_event(msg, sender, &delayed_messages); + } + if let Ok(msg) = event.focus().map(ApprovalDistributionMessage::from) { + send_or_queue_validation_event(msg, sender, &delayed_messages); + } + if let Ok(msg) = event.focus().map(GossipSupportMessage::from) { + send_or_queue_validation_event(msg, sender, &delayed_messages); + } + } + + let delayed_messages_count = delayed_messages.len(); + metrics.on_delayed_rx_queue(delayed_messages_count); + + if delayed_messages_count > 0 { + // Here we wait for all the delayed messages to be sent. + let _timer = metrics.time_delayed_rx_events(); // Dropped after `await` is completed + let _: Vec<()> = delayed_messages.collect().await; } } diff --git a/polkadot/node/network/bridge/src/tx/mod.rs b/polkadot/node/network/bridge/src/tx/mod.rs index 1b386ce1239bd..7fa1149593cab 100644 --- a/polkadot/node/network/bridge/src/tx/mod.rs +++ b/polkadot/node/network/bridge/src/tx/mod.rs @@ -33,6 +33,7 @@ use polkadot_node_subsystem::{ /// /// To be passed to [`FullNetworkConfiguration::add_notification_protocol`](). pub use polkadot_node_network_protocol::peer_set::{peer_sets_info, IsAuthority}; +use polkadot_node_network_protocol::request_response::Requests; use sc_network::ReputationChange; use crate::validator_discovery; @@ -290,6 +291,20 @@ where ); for req in reqs { + match req { + Requests::ChunkFetchingV1(_) => metrics.on_message("chunk_fetching_v1"), + Requests::AvailableDataFetchingV1(_) => + metrics.on_message("available_data_fetching_v1"), + Requests::CollationFetchingV1(_) => metrics.on_message("collation_fetching_v1"), + Requests::CollationFetchingVStaging(_) => + metrics.on_message("collation_fetching_vstaging"), + Requests::PoVFetchingV1(_) => metrics.on_message("pov_fetching_v1"), + Requests::DisputeSendingV1(_) => metrics.on_message("dispute_sending_v1"), + Requests::StatementFetchingV1(_) => metrics.on_message("statement_fetching_v1"), + Requests::AttestedCandidateVStaging(_) => + metrics.on_message("attested_candidate_vstaging"), + } + network_service .start_request( &mut authority_discovery_service, diff --git a/polkadot/node/overseer/Cargo.toml b/polkadot/node/overseer/Cargo.toml index 0efd4d4c6ca84..5d41407ef83a8 100644 --- a/polkadot/node/overseer/Cargo.toml +++ b/polkadot/node/overseer/Cargo.toml @@ -16,7 +16,7 @@ polkadot-node-primitives = { path = "../primitives" } polkadot-node-subsystem-types = { path = "../subsystem-types" } polkadot-node-metrics = { path = "../metrics" } polkadot-primitives = { path = "../../primitives" } -orchestra = "0.0.5" +orchestra = { version = "0.3.3", default-features = false, features=["futures_channel"] } gum = { package = "tracing-gum", path = "../gum" } schnellru = "0.2.1" sp-core = { path = "../../../substrate/primitives/core" } @@ -24,7 +24,7 @@ async-trait = "0.1.57" tikv-jemalloc-ctl = { version = "0.5.0", optional = true } [dev-dependencies] -metered = { package = "prioritized-metered-channel", version = "0.2.0" } +metered = { package = "prioritized-metered-channel", version = "0.5.1", default-features = false, features=["futures_channel"] } sp-core = { path = "../../../substrate/primitives/core" } futures = { version = "0.3.21", features = ["thread-pool"] } femme = "2.2.1" @@ -36,7 +36,8 @@ node-test-helpers = { package = "polkadot-node-subsystem-test-helpers", path = " tikv-jemalloc-ctl = "0.5.0" [features] -default = [] -expand = [ "orchestra/expand" ] +default = [ "futures_channel" ] dotgraph = [ "orchestra/dotgraph" ] +expand = [ "orchestra/expand" ] +futures_channel = [ "metered/futures_channel", "orchestra/futures_channel" ] jemalloc-allocator = [ "dep:tikv-jemalloc-ctl" ] diff --git a/polkadot/node/overseer/src/lib.rs b/polkadot/node/overseer/src/lib.rs index 7337f1e6be7cf..84d5d19c3b93c 100644 --- a/polkadot/node/overseer/src/lib.rs +++ b/polkadot/node/overseer/src/lib.rs @@ -107,7 +107,7 @@ pub use orchestra::{ contextbounds, orchestra, subsystem, FromOrchestra, MapSubsystem, MessagePacket, OrchestraError as OverseerError, SignalsReceived, Spawner, Subsystem, SubsystemContext, SubsystemIncomingMessages, SubsystemInstance, SubsystemMeterReadouts, SubsystemMeters, - SubsystemSender, TimeoutExt, ToOrchestra, + SubsystemSender, TimeoutExt, ToOrchestra, TrySendError, }; /// Store 2 days worth of blocks, not accounting for forks, diff --git a/polkadot/node/overseer/src/tests.rs b/polkadot/node/overseer/src/tests.rs index 298783f418051..c17613fb7ea5d 100644 --- a/polkadot/node/overseer/src/tests.rs +++ b/polkadot/node/overseer/src/tests.rs @@ -1074,6 +1074,7 @@ fn overseer_all_subsystems_receive_signals_and_messages() { #[test] fn context_holds_onto_message_until_enough_signals_received() { + const CHANNEL_CAPACITY: usize = 64; let (candidate_validation_bounded_tx, _) = metered::channel(CHANNEL_CAPACITY); let (candidate_backing_bounded_tx, _) = metered::channel(CHANNEL_CAPACITY); let (statement_distribution_bounded_tx, _) = metered::channel(CHANNEL_CAPACITY); diff --git a/polkadot/node/subsystem-test-helpers/src/lib.rs b/polkadot/node/subsystem-test-helpers/src/lib.rs index fe6b106bf46e4..3f92513498c41 100644 --- a/polkadot/node/subsystem-test-helpers/src/lib.rs +++ b/polkadot/node/subsystem-test-helpers/src/lib.rs @@ -20,7 +20,7 @@ use polkadot_node_subsystem::{ messages::AllMessages, overseer, FromOrchestra, OverseerSignal, SpawnGlue, SpawnedSubsystem, - SubsystemError, SubsystemResult, + SubsystemError, SubsystemResult, TrySendError, }; use polkadot_node_subsystem_util::TimeoutExt; @@ -160,6 +160,14 @@ where self.tx.send(msg.into()).await.expect("test overseer no longer live"); } + fn try_send_message( + &mut self, + msg: OutgoingMessage, + ) -> Result<(), TrySendError> { + self.tx.unbounded_send(msg.into()).expect("test overseer no longer live"); + Ok(()) + } + async fn send_messages(&mut self, msgs: I) where I: IntoIterator + Send, diff --git a/polkadot/node/subsystem-types/Cargo.toml b/polkadot/node/subsystem-types/Cargo.toml index f6965cf647c34..a1c00cb0652e2 100644 --- a/polkadot/node/subsystem-types/Cargo.toml +++ b/polkadot/node/subsystem-types/Cargo.toml @@ -14,7 +14,7 @@ polkadot-node-primitives = { path = "../primitives" } polkadot-node-network-protocol = { path = "../network/protocol" } polkadot-statement-table = { path = "../../statement-table" } polkadot-node-jaeger = { path = "../jaeger" } -orchestra = "0.0.5" +orchestra = { version = "0.3.3", default-features = false, features=["futures_channel"] } sc-network = { path = "../../../substrate/client/network" } sp-api = { path = "../../../substrate/primitives/api" } sp-consensus-babe = { path = "../../../substrate/primitives/consensus/babe" } diff --git a/polkadot/node/subsystem-util/Cargo.toml b/polkadot/node/subsystem-util/Cargo.toml index 0d5ae7a0e8e6c..d9364e2c2c0f1 100644 --- a/polkadot/node/subsystem-util/Cargo.toml +++ b/polkadot/node/subsystem-util/Cargo.toml @@ -29,7 +29,7 @@ polkadot-node-network-protocol = { path = "../network/protocol" } polkadot-primitives = { path = "../../primitives" } polkadot-node-primitives = { path = "../primitives" } polkadot-overseer = { path = "../overseer" } -metered = { package = "prioritized-metered-channel", version = "0.2.0" } +metered = { package = "prioritized-metered-channel", version = "0.5.1", default-features = false, features=["futures_channel"] } sp-core = { path = "../../../substrate/primitives/core" } sp-application-crypto = { path = "../../../substrate/primitives/application-crypto" } From c879d1d5827f1fa4f72621ce185fa08ac3ea8d31 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Mon, 11 Sep 2023 22:45:18 +0200 Subject: [PATCH 05/16] contracts: Run start function (#1367) Fixes #116 Start function wasn't allowed in a contract. Now it is allowed and is being run. It was disallowed because it is not used by Rust and supporting it made the code more complex. However, not running the start function violates the wasm standard. This makes life harder for some languages (see linked ticket). --- .../fixtures/run_out_of_gas_start_fn.wat | 10 + .../contracts/src/benchmarking/sandbox.rs | 8 +- substrate/frame/contracts/src/exec.rs | 111 ++++----- substrate/frame/contracts/src/lib.rs | 8 +- substrate/frame/contracts/src/tests.rs | 21 ++ substrate/frame/contracts/src/wasm/mod.rs | 212 +++++++++--------- 6 files changed, 198 insertions(+), 172 deletions(-) create mode 100644 substrate/frame/contracts/fixtures/run_out_of_gas_start_fn.wat diff --git a/substrate/frame/contracts/fixtures/run_out_of_gas_start_fn.wat b/substrate/frame/contracts/fixtures/run_out_of_gas_start_fn.wat new file mode 100644 index 0000000000000..6591d7ede78c2 --- /dev/null +++ b/substrate/frame/contracts/fixtures/run_out_of_gas_start_fn.wat @@ -0,0 +1,10 @@ +(module + (import "env" "memory" (memory 1 1)) + (start $start) + (func $start + (loop $inf (br $inf)) ;; just run out of gas + (unreachable) + ) + (func (export "call")) + (func (export "deploy")) +) diff --git a/substrate/frame/contracts/src/benchmarking/sandbox.rs b/substrate/frame/contracts/src/benchmarking/sandbox.rs index 34974b02ea0c4..c3abbcad5f2b3 100644 --- a/substrate/frame/contracts/src/benchmarking/sandbox.rs +++ b/substrate/frame/contracts/src/benchmarking/sandbox.rs @@ -58,7 +58,13 @@ impl From<&WasmModule> for Sandbox { .add_fuel(u64::MAX) .expect("We've set up engine to fuel consuming mode; qed"); - let entry_point = instance.get_export(&store, "call").unwrap().into_func().unwrap(); + let entry_point = instance + .start(&mut store) + .unwrap() + .get_export(&store, "call") + .unwrap() + .into_func() + .unwrap(); Self { entry_point, store } } } diff --git a/substrate/frame/contracts/src/exec.rs b/substrate/frame/contracts/src/exec.rs index fdb30310ef705..f93e7a2b21a55 100644 --- a/substrate/frame/contracts/src/exec.rs +++ b/substrate/frame/contracts/src/exec.rs @@ -21,7 +21,7 @@ use crate::{ storage::{self, meter::Diff, WriteOutcome}, BalanceOf, CodeHash, CodeInfo, CodeInfoOf, Config, ContractInfo, ContractInfoOf, DebugBufferVec, Determinism, Error, Event, Nonce, Origin, Pallet as Contracts, Schedule, - WasmBlob, LOG_TARGET, + LOG_TARGET, }; use frame_support::{ crypto::ecdsa::ECDSAExt, @@ -318,6 +318,22 @@ pub trait Ext: sealing::Sealed { /// Returns a nonce that is incremented for every instantiated contract. fn nonce(&mut self) -> u64; + /// Increment the reference count of a of a stored code by one. + /// + /// # Errors + /// + /// [`Error::CodeNotFound`] is returned if no stored code found having the specified + /// `code_hash`. + fn increment_refcount(code_hash: CodeHash) -> Result<(), DispatchError>; + + /// Decrement the reference count of a stored code by one. + /// + /// # Note + /// + /// A contract whose reference count dropped to zero isn't automatically removed. A + /// `remove_code` transaction must be submitted by the original uploader to do so. + fn decrement_refcount(code_hash: CodeHash); + /// Adds a delegate dependency to [`ContractInfo`]'s `delegate_dependencies` field. /// /// This ensures that the delegated contract is not removed while it is still in use. It @@ -381,22 +397,6 @@ pub trait Executable: Sized { gas_meter: &mut GasMeter, ) -> Result; - /// Increment the reference count of a of a stored code by one. - /// - /// # Errors - /// - /// [`Error::CodeNotFound`] is returned if no stored code found having the specified - /// `code_hash`. - fn increment_refcount(code_hash: CodeHash) -> Result<(), DispatchError>; - - /// Decrement the reference count of a stored code by one. - /// - /// # Note - /// - /// A contract whose reference count dropped to zero isn't automatically removed. A - /// `remove_code` transaction must be submitted by the original uploader to do so. - fn decrement_refcount(code_hash: CodeHash); - /// Execute the specified exported function and return the result. /// /// When the specified function is `Constructor` the executable is stored and its @@ -1285,10 +1285,10 @@ where info.queue_trie_for_deletion(); ContractInfoOf::::remove(&frame.account_id); - E::decrement_refcount(info.code_hash); + Self::decrement_refcount(info.code_hash); for (code_hash, deposit) in info.delegate_dependencies() { - E::decrement_refcount(*code_hash); + Self::decrement_refcount(*code_hash); frame .nested_storage .charge_deposit(frame.account_id.clone(), StorageDeposit::Refund(*deposit)); @@ -1491,8 +1491,8 @@ where frame.nested_storage.charge_deposit(frame.account_id.clone(), deposit); - E::increment_refcount(hash)?; - E::decrement_refcount(prev_hash); + Self::increment_refcount(hash)?; + Self::decrement_refcount(prev_hash); Contracts::::deposit_event( vec![T::Hashing::hash_of(&frame.account_id), hash, prev_hash], Event::ContractCodeUpdated { @@ -1525,6 +1525,25 @@ where } } + fn increment_refcount(code_hash: CodeHash) -> Result<(), DispatchError> { + >::mutate(code_hash, |existing| -> Result<(), DispatchError> { + if let Some(info) = existing { + *info.refcount_mut() = info.refcount().saturating_add(1); + Ok(()) + } else { + Err(Error::::CodeNotFound.into()) + } + }) + } + + fn decrement_refcount(code_hash: CodeHash) { + >::mutate(code_hash, |existing| { + if let Some(info) = existing { + *info.refcount_mut() = info.refcount().saturating_sub(1); + } + }); + } + fn add_delegate_dependency( &mut self, code_hash: CodeHash, @@ -1537,7 +1556,7 @@ where let deposit = T::CodeHashLockupDepositPercent::get().mul_ceil(code_info.deposit()); info.add_delegate_dependency(code_hash, deposit)?; - >::increment_refcount(code_hash)?; + Self::increment_refcount(code_hash)?; frame .nested_storage .charge_deposit(frame.account_id.clone(), StorageDeposit::Charge(deposit)); @@ -1552,8 +1571,7 @@ where let info = frame.contract_info.get(&frame.account_id); let deposit = info.remove_delegate_dependency(code_hash)?; - >::decrement_refcount(*code_hash); - + Self::decrement_refcount(*code_hash); frame .nested_storage .charge_deposit(frame.account_id.clone(), StorageDeposit::Refund(deposit)); @@ -1600,11 +1618,7 @@ mod tests { use pallet_contracts_primitives::ReturnFlags; use pretty_assertions::assert_eq; use sp_runtime::{traits::Hash, DispatchError}; - use std::{ - cell::RefCell, - collections::hash_map::{Entry, HashMap}, - rc::Rc, - }; + use std::{cell::RefCell, collections::hash_map::HashMap, rc::Rc}; type System = frame_system::Pallet; @@ -1635,7 +1649,6 @@ mod tests { func_type: ExportedFunction, code_hash: CodeHash, code_info: CodeInfo, - refcount: u64, } #[derive(Default, Clone)] @@ -1664,37 +1677,11 @@ mod tests { func_type, code_hash: hash, code_info: CodeInfo::::new(ALICE), - refcount: 1, }, ); hash }) } - - fn increment_refcount(code_hash: CodeHash) -> Result<(), DispatchError> { - Loader::mutate(|loader| { - match loader.map.entry(code_hash) { - Entry::Vacant(_) => Err(>::CodeNotFound)?, - Entry::Occupied(mut entry) => entry.get_mut().refcount += 1, - } - Ok(()) - }) - } - - fn decrement_refcount(code_hash: CodeHash) { - use std::collections::hash_map::Entry::Occupied; - Loader::mutate(|loader| { - let mut entry = match loader.map.entry(code_hash) { - Occupied(e) => e, - _ => panic!("code_hash does not exist"), - }; - let refcount = &mut entry.get_mut().refcount; - *refcount -= 1; - if *refcount == 0 { - entry.remove(); - } - }); - } } impl Executable for MockExecutable { @@ -1707,14 +1694,6 @@ mod tests { }) } - fn increment_refcount(code_hash: CodeHash) -> Result<(), DispatchError> { - MockLoader::increment_refcount(code_hash) - } - - fn decrement_refcount(code_hash: CodeHash) { - MockLoader::decrement_refcount(code_hash); - } - fn execute>( self, ext: &mut E, @@ -1722,7 +1701,7 @@ mod tests { input_data: Vec, ) -> ExecResult { if let &Constructor = function { - Self::increment_refcount(self.code_hash).unwrap(); + E::increment_refcount(self.code_hash).unwrap(); } // # Safety // @@ -1733,7 +1712,7 @@ mod tests { // The transmute is necessary because `execute` has to be generic over all // `E: Ext`. However, `MockExecutable` can't be generic over `E` as it would // constitute a cycle. - let ext = unsafe { std::mem::transmute(ext) }; + let ext = unsafe { mem::transmute(ext) }; if function == &self.func_type { (self.func)(MockCtx { ext, input_data }, &self) } else { diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index e22e4a3f9ff86..7d516fbe2496c 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -103,7 +103,9 @@ pub mod weights; #[cfg(test)] mod tests; use crate::{ - exec::{AccountIdOf, ErrorOrigin, ExecError, Executable, Key, MomentOf, Stack as ExecStack}, + exec::{ + AccountIdOf, ErrorOrigin, ExecError, Executable, Ext, Key, MomentOf, Stack as ExecStack, + }, gas::GasMeter, storage::{meter::Meter as StorageMeter, ContractInfo, DeletionQueueManager}, wasm::{CodeInfo, WasmBlob}, @@ -658,8 +660,8 @@ pub mod pallet { } else { return Err(>::ContractNotFound.into()) }; - >::increment_refcount(code_hash)?; - >::decrement_refcount(contract.code_hash); + >>::increment_refcount(code_hash)?; + >>::decrement_refcount(contract.code_hash); Self::deposit_event( vec![T::Hashing::hash_of(&dest), code_hash, contract.code_hash], Event::ContractCodeUpdated { diff --git a/substrate/frame/contracts/src/tests.rs b/substrate/frame/contracts/src/tests.rs index 8d6c5c5ac728a..0fea2b1559509 100644 --- a/substrate/frame/contracts/src/tests.rs +++ b/substrate/frame/contracts/src/tests.rs @@ -862,6 +862,27 @@ fn deposit_event_max_value_limit() { }); } +// Fail out of fuel (ref_time weight) inside the start function. +#[test] +fn run_out_of_fuel_start_fun() { + let (wasm, _code_hash) = compile_module::("run_out_of_gas_start_fn").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + assert_err_ignore_postinfo!( + Contracts::instantiate_with_code( + RuntimeOrigin::signed(ALICE), + 0, + Weight::from_parts(1_000_000_000_000, u64::MAX), + None, + wasm, + vec![], + vec![], + ), + Error::::OutOfGas, + ); + }); +} + // Fail out of fuel (ref_time weight) in the engine. #[test] fn run_out_of_fuel_engine() { diff --git a/substrate/frame/contracts/src/wasm/mod.rs b/substrate/frame/contracts/src/wasm/mod.rs index 5fc65e314ad9f..77e94b16777b0 100644 --- a/substrate/frame/contracts/src/wasm/mod.rs +++ b/substrate/frame/contracts/src/wasm/mod.rs @@ -49,7 +49,7 @@ use frame_support::{ use sp_core::Get; use sp_runtime::{DispatchError, RuntimeDebug}; use sp_std::prelude::*; -use wasmi::{Instance, Linker, Memory, MemoryType, StackLimits, Store}; +use wasmi::{InstancePre, Linker, Memory, MemoryType, StackLimits, Store}; const BYTES_PER_PAGE: usize = 64 * 1024; @@ -164,7 +164,30 @@ impl WasmBlob { /// /// Applies all necessary checks before removing the code. pub fn remove(origin: &T::AccountId, code_hash: CodeHash) -> DispatchResult { - Self::try_remove_code(origin, code_hash) + >::try_mutate_exists(&code_hash, |existing| { + if let Some(code_info) = existing { + ensure!(code_info.refcount == 0, >::CodeInUse); + ensure!(&code_info.owner == origin, BadOrigin); + let _ = T::Currency::release( + &HoldReason::CodeUploadDepositReserve.into(), + &code_info.owner, + code_info.deposit, + BestEffort, + ); + let deposit_released = code_info.deposit; + let remover = code_info.owner.clone(); + + *existing = None; + >::remove(&code_hash); + >::deposit_event( + vec![code_hash], + Event::CodeRemoved { code_hash, deposit_released, remover }, + ); + Ok(()) + } else { + Err(>::CodeNotFound.into()) + } + }) } /// Creates and returns an instance of the supplied code. @@ -179,7 +202,7 @@ impl WasmBlob { determinism: Determinism, stack_limits: StackLimits, allow_deprecated: AllowDeprecatedInterface, - ) -> Result<(Store, Memory, Instance), &'static str> + ) -> Result<(Store, Memory, InstancePre), &'static str> where E: Environment, { @@ -217,9 +240,7 @@ impl WasmBlob { let instance = linker .instantiate(&mut store, &contract.module) - .map_err(|_| "can't instantiate module with provided definitions")? - .ensure_no_start(&mut store) - .map_err(|_| "start function is forbidden but found in the module")?; + .map_err(|_| "can't instantiate module with provided definitions")?; Ok((store, memory, instance)) } @@ -261,45 +282,6 @@ impl WasmBlob { }) } - /// Try to remove code together with all associated information. - fn try_remove_code(origin: &T::AccountId, code_hash: CodeHash) -> DispatchResult { - >::try_mutate_exists(&code_hash, |existing| { - if let Some(code_info) = existing { - ensure!(code_info.refcount == 0, >::CodeInUse); - ensure!(&code_info.owner == origin, BadOrigin); - let _ = T::Currency::release( - &HoldReason::CodeUploadDepositReserve.into(), - &code_info.owner, - code_info.deposit, - BestEffort, - ); - let deposit_released = code_info.deposit; - let remover = code_info.owner.clone(); - - *existing = None; - >::remove(&code_hash); - >::deposit_event( - vec![code_hash], - Event::CodeRemoved { code_hash, deposit_released, remover }, - ); - Ok(()) - } else { - Err(>::CodeNotFound.into()) - } - }) - } - - /// Load code with the given code hash. - fn load_code( - code_hash: CodeHash, - gas_meter: &mut GasMeter, - ) -> Result<(CodeVec, CodeInfo), DispatchError> { - let code_info = >::get(code_hash).ok_or(Error::::CodeNotFound)?; - gas_meter.charge(CodeLoadToken(code_info.code_len))?; - let code = >::get(code_hash).ok_or(Error::::CodeNotFound)?; - Ok((code, code_info)) - } - /// Create the module without checking the passed code. /// /// # Note @@ -318,12 +300,6 @@ impl WasmBlob { } impl CodeInfo { - /// Return the refcount of the module. - #[cfg(test)] - pub fn refcount(&self) -> u64 { - self.refcount - } - #[cfg(test)] pub fn new(owner: T::AccountId) -> Self { CodeInfo { @@ -335,6 +311,16 @@ impl CodeInfo { } } + /// Returns reference count of the module. + pub fn refcount(&self) -> u64 { + self.refcount + } + + /// Return mutable reference to the refcount of the module. + pub fn refcount_mut(&mut self) -> &mut u64 { + &mut self.refcount + } + /// Returns the deposit of the module. pub fn deposit(&self) -> BalanceOf { self.deposit @@ -346,29 +332,12 @@ impl Executable for WasmBlob { code_hash: CodeHash, gas_meter: &mut GasMeter, ) -> Result { - let (code, code_info) = Self::load_code(code_hash, gas_meter)?; + let code_info = >::get(code_hash).ok_or(Error::::CodeNotFound)?; + gas_meter.charge(CodeLoadToken(code_info.code_len))?; + let code = >::get(code_hash).ok_or(Error::::CodeNotFound)?; Ok(Self { code, code_info, code_hash }) } - fn increment_refcount(code_hash: CodeHash) -> Result<(), DispatchError> { - >::mutate(code_hash, |existing| -> Result<(), DispatchError> { - if let Some(info) = existing { - info.refcount = info.refcount.saturating_add(1); - Ok(()) - } else { - Err(Error::::CodeNotFound.into()) - } - }) - } - - fn decrement_refcount(code_hash: CodeHash) { - >::mutate(code_hash, |existing| { - if let Some(info) = existing { - info.refcount = info.refcount.saturating_sub(1); - } - }); - } - fn execute>( self, ext: &mut E, @@ -410,25 +379,38 @@ impl Executable for WasmBlob { .add_fuel(fuel_limit) .expect("We've set up engine to fuel consuming mode; qed"); - let exported_func = instance - .get_export(&store, function.identifier()) - .and_then(|export| export.into_func()) - .ok_or_else(|| { - log::error!(target: LOG_TARGET, "failed to find entry point"); - Error::::CodeRejected - })?; + // Sync this frame's gas meter with the engine's one. + let process_result = |mut store: Store>, result| { + let engine_consumed_total = + store.fuel_consumed().expect("Fuel metering is enabled; qed"); + let gas_meter = store.data_mut().ext().gas_meter_mut(); + gas_meter.charge_fuel(engine_consumed_total)?; + store.into_data().to_execution_result(result) + }; + // Start function should already see the correct refcount in case it will be ever inspected. if let &ExportedFunction::Constructor = function { - WasmBlob::::increment_refcount(self.code_hash)?; + E::increment_refcount(self.code_hash)?; } - let result = exported_func.call(&mut store, &[], &mut []); - let engine_consumed_total = store.fuel_consumed().expect("Fuel metering is enabled; qed"); - // Sync this frame's gas meter with the engine's one. - let gas_meter = store.data_mut().ext().gas_meter_mut(); - gas_meter.charge_fuel(engine_consumed_total)?; - - store.into_data().to_execution_result(result) + // Any abort in start function (includes `return` + `terminate`) will make us skip the + // call into the subsequent exported function. This means that calling `return` returns data + // from the whole contract execution. + match instance.start(&mut store) { + Ok(instance) => { + let exported_func = instance + .get_export(&store, function.identifier()) + .and_then(|export| export.into_func()) + .ok_or_else(|| { + log::error!(target: LOG_TARGET, "failed to find entry point"); + Error::::CodeRejected + })?; + + let result = exported_func.call(&mut store, &[], &mut []); + process_result(store, result) + }, + Err(err) => process_result(store, Err(err)), + } } fn code_hash(&self) -> &CodeHash { @@ -740,7 +722,10 @@ mod tests { fn nonce(&mut self) -> u64 { 995 } - + fn increment_refcount(_code_hash: CodeHash) -> Result<(), DispatchError> { + Ok(()) + } + fn decrement_refcount(_code_hash: CodeHash) {} fn add_delegate_dependency( &mut self, code: CodeHash, @@ -748,7 +733,6 @@ mod tests { self.delegate_dependencies.borrow_mut().insert(code); Ok(()) } - fn remove_delegate_dependency( &mut self, code: &CodeHash, @@ -790,11 +774,20 @@ mod tests { executable.execute(ext.borrow_mut(), entry_point, input_data) } - /// Execute the supplied code. + /// Execute the `call` function within the supplied code. fn execute>(wat: &str, input_data: Vec, ext: E) -> ExecResult { execute_internal(wat, input_data, ext, &ExportedFunction::Call, true, false) } + /// Execute the `deploy` function within the supplied code. + fn execute_instantiate>( + wat: &str, + input_data: Vec, + ext: E, + ) -> ExecResult { + execute_internal(wat, input_data, ext, &ExportedFunction::Constructor, true, false) + } + /// Execute the supplied code with disabled unstable functions. /// /// In our test config unstable functions are disabled so that we can test them. @@ -1878,32 +1871,47 @@ mod tests { assert_ok!(execute(CODE_VALUE_TRANSFERRED, vec![], MockExt::default())); } - const START_FN_ILLEGAL: &str = r#" + const START_FN_DOES_RUN: &str = r#" (module - (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) + (import "seal0" "seal_deposit_event" (func $seal_deposit_event (param i32 i32 i32 i32))) (import "env" "memory" (memory 1 1)) (start $start) (func $start - (unreachable) + (call $seal_deposit_event + (i32.const 0) ;; Pointer to the start of topics buffer + (i32.const 0) ;; The length of the topics buffer. + (i32.const 0) ;; Pointer to the start of the data buffer + (i32.const 13) ;; Length of the buffer + ) ) - (func (export "call") - (unreachable) - ) + (func (export "call")) - (func (export "deploy") - (unreachable) - ) + (func (export "deploy")) - (data (i32.const 8) "\01\02\03\04") + (data (i32.const 0) "\00\01\2A\00\00\00\00\00\00\00\E5\14\00") ) "#; #[test] - fn start_fn_illegal() { - let output = execute(START_FN_ILLEGAL, vec![], MockExt::default()); - assert_err!(output, >::CodeRejected,); + fn start_fn_does_run_on_call() { + let mut ext = MockExt::default(); + execute(START_FN_DOES_RUN, vec![], &mut ext).unwrap(); + assert_eq!( + ext.events[0].1, + [0x00_u8, 0x01, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe5, 0x14, 0x00] + ); + } + + #[test] + fn start_fn_does_run_on_deploy() { + let mut ext = MockExt::default(); + execute_instantiate(START_FN_DOES_RUN, vec![], &mut ext).unwrap(); + assert_eq!( + ext.events[0].1, + [0x00_u8, 0x01, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe5, 0x14, 0x00] + ); } const CODE_TIMESTAMP_NOW: &str = r#" From 77c867bafff800488af43fa7e3602b914c062876 Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Tue, 12 Sep 2023 16:13:27 +1000 Subject: [PATCH 06/16] fix simple analytics injection script (#1501) When `process_file` is run by `xargs`, it is executed inside a new shell without access to variables defined outside of its scope. This resulted in `script_content` being an empty string. By exporting `script_content` prior to running `xargs` it is available inside the new shells. --- .gitlab/pipeline/build.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitlab/pipeline/build.yml b/.gitlab/pipeline/build.yml index 328b37af1d4de..924d648e3f989 100644 --- a/.gitlab/pipeline/build.yml +++ b/.gitlab/pipeline/build.yml @@ -111,6 +111,9 @@ build-rustdoc: sed -i "s| |$script_content |" "$file" } export -f process_file + # xargs runs process_file in seperate shells without access to outer variables. + # to make script_content available inside process_file, export it as an env var here. + export script_content # Modify .html files in parallel using xargs, otherwise it can take a long time. find "$path" -name '*.html' | xargs -I {} -P "$(nproc)" bash -c 'process_file "$@"' _ {} From d08170278bc795a322fbe21368bff84145b8e467 Mon Sep 17 00:00:00 2001 From: Tsvetomir Dimitrov Date: Tue, 12 Sep 2023 09:49:48 +0300 Subject: [PATCH 07/16] Additional logging for `dispute-coordinator` (#1494) --- .../dispute-coordinator/src/initialized.rs | 7 +++++++ .../src/participation/mod.rs | 19 +++++++++++++++++++ .../src/participation/queues/mod.rs | 10 ++++++++++ 3 files changed, 36 insertions(+) diff --git a/polkadot/node/core/dispute-coordinator/src/initialized.rs b/polkadot/node/core/dispute-coordinator/src/initialized.rs index 9bfca2d81a047..9cd544a8c5362 100644 --- a/polkadot/node/core/dispute-coordinator/src/initialized.rs +++ b/polkadot/node/core/dispute-coordinator/src/initialized.rs @@ -301,6 +301,13 @@ impl Initialized { self.participation.process_active_leaves_update(ctx, &update).await?; if let Some(new_leaf) = update.activated { + gum::trace!( + target: LOG_TARGET, + leaf_hash = ?new_leaf.hash, + block_number = new_leaf.number, + "Processing ActivatedLeaf" + ); + let session_idx = self.runtime_info.get_session_index_for_child(ctx.sender(), new_leaf.hash).await; diff --git a/polkadot/node/core/dispute-coordinator/src/participation/mod.rs b/polkadot/node/core/dispute-coordinator/src/participation/mod.rs index 5a3c4be90aa01..35d07b411e8f6 100644 --- a/polkadot/node/core/dispute-coordinator/src/participation/mod.rs +++ b/polkadot/node/core/dispute-coordinator/src/participation/mod.rs @@ -260,6 +260,13 @@ impl Participation { req: ParticipationRequest, recent_head: Hash, ) -> FatalResult<()> { + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?req.candidate_hash(), + session = req.session(), + "Forking participation" + ); + let participation_timer = self.metrics.time_participation(); if self.running_participations.insert(*req.candidate_hash()) { let sender = ctx.sender().clone(); @@ -314,12 +321,24 @@ async fn participate( }, Ok(Ok(data)) => data, Ok(Err(RecoveryError::Invalid)) => { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?req.candidate_hash(), + session = req.session(), + "Invalid availability data during participation" + ); // the available data was recovered but it is invalid, therefore we'll // vote negatively for the candidate dispute send_result(&mut result_sender, req, ParticipationOutcome::Invalid).await; return }, Ok(Err(RecoveryError::Unavailable)) | Ok(Err(RecoveryError::ChannelClosed)) => { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?req.candidate_hash(), + session = req.session(), + "Can't fetch availability data in participation" + ); send_result(&mut result_sender, req, ParticipationOutcome::Unavailable).await; return }, diff --git a/polkadot/node/core/dispute-coordinator/src/participation/queues/mod.rs b/polkadot/node/core/dispute-coordinator/src/participation/queues/mod.rs index 1105135a747f2..d9e86def168c9 100644 --- a/polkadot/node/core/dispute-coordinator/src/participation/queues/mod.rs +++ b/polkadot/node/core/dispute-coordinator/src/participation/queues/mod.rs @@ -277,6 +277,11 @@ impl Queues { match self.priority.entry(comparator) { Entry::Occupied(_) => req.discard_timer(), Entry::Vacant(vac) => { + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?req.candidate_hash(), + "Added to priority participation queue" + ); vac.insert(req); }, } @@ -295,6 +300,11 @@ impl Queues { match self.best_effort.entry(comparator) { Entry::Occupied(_) => req.discard_timer(), Entry::Vacant(vac) => { + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?req.candidate_hash(), + "Added to best effort participation queue" + ); vac.insert(req); }, } From f551f52e3554a18e910b5a98f16ee851b25e9911 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Sep 2023 17:02:39 +1000 Subject: [PATCH 08/16] Bump cfg-expr from 0.15.4 to 0.15.5 (#1502) Bumps [cfg-expr](https://github.com/EmbarkStudios/cfg-expr) from 0.15.4 to 0.15.5.
Release notes

Sourced from cfg-expr's releases.

Release 0.15.5

Changed

  • PR#64 updated the builtin target list to 1.72.0. It also changed the MSRV to 1.70.0.
Changelog

Sourced from cfg-expr's changelog.

[0.15.5] - 2023-09-08

Changed

  • PR#64 updated the builtin target list to 1.72.0. It also changed the MSRV to 1.70.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=cfg-expr&package-manager=cargo&previous-version=0.15.4&new-version=0.15.5)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore major version` will close this group update PR and stop Dependabot creating any more for the specific dependency's major version (unless you unignore this specific dependency's major version or upgrade to it yourself) - `@dependabot ignore minor version` will close this group update PR and stop Dependabot creating any more for the specific dependency's minor version (unless you unignore this specific dependency's minor version or upgrade to it yourself) - `@dependabot ignore ` will close this group update PR and stop Dependabot creating any more for the specific dependency (unless you unignore this specific dependency or upgrade to it yourself) - `@dependabot unignore ` will remove all of the ignore conditions of the specified dependency - `@dependabot unignore ` will remove the ignore condition of the specified dependency and ignore conditions
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- substrate/frame/support/procedural/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 686ee62add3aa..eb13ab4b76a93 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2274,9 +2274,9 @@ dependencies = [ [[package]] name = "cfg-expr" -version = "0.15.4" +version = "0.15.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b40ccee03b5175c18cde8f37e7d2a33bcef6f8ec8f7cc0d81090d1bb380949c9" +checksum = "03915af431787e6ffdcc74c645077518c6b6e01f80b761e0fbbfa288536311b3" dependencies = [ "smallvec", ] diff --git a/substrate/frame/support/procedural/Cargo.toml b/substrate/frame/support/procedural/Cargo.toml index 07a7ed079a9ef..308384f03632d 100644 --- a/substrate/frame/support/procedural/Cargo.toml +++ b/substrate/frame/support/procedural/Cargo.toml @@ -17,7 +17,7 @@ proc-macro = true [dependencies] derive-syn-parse = "0.1.5" Inflector = "0.11.4" -cfg-expr = "0.15.4" +cfg-expr = "0.15.5" itertools = "0.10.3" proc-macro2 = "1.0.56" quote = "1.0.28" From 391591b6a4e45d49bfafca77a41d1b684c9d5efe Mon Sep 17 00:00:00 2001 From: Lulu Date: Tue, 12 Sep 2023 11:41:34 +0100 Subject: [PATCH 09/16] Fix mistakes in Cargo.toml (#1504) --- substrate/frame/contracts/Cargo.toml | 2 +- substrate/frame/tx-pause/Cargo.toml | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/substrate/frame/contracts/Cargo.toml b/substrate/frame/contracts/Cargo.toml index 237ab9303e250..d5c809e1bf7cf 100644 --- a/substrate/frame/contracts/Cargo.toml +++ b/substrate/frame/contracts/Cargo.toml @@ -9,7 +9,7 @@ homepage = "https://substrate.io" repository.workspace = true description = "FRAME pallet for WASM contracts" readme = "README.md" -include = ["src/**/*", "README.md", "CHANGELOG.md"] +include = ["src/**/*", "build.rs", "README.md", "CHANGELOG.md"] [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/substrate/frame/tx-pause/Cargo.toml b/substrate/frame/tx-pause/Cargo.toml index 356693d90f04f..6d96cb8abe799 100644 --- a/substrate/frame/tx-pause/Cargo.toml +++ b/substrate/frame/tx-pause/Cargo.toml @@ -7,7 +7,6 @@ license = "Apache-2.0" homepage = "https://substrate.io" repository.workspace = true description = "FRAME transaction pause pallet" -readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] From f5ca403af9cea41230b27d74c98b205d9ba023c9 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Tue, 12 Sep 2023 14:38:31 +0300 Subject: [PATCH 10/16] Report `tracing_unbounded` channel size to prometheus (#1489) --- substrate/client/utils/src/metrics.rs | 27 ++++++++++++++++++++++----- substrate/client/utils/src/mpsc.rs | 27 ++++++++++++++++++++------- 2 files changed, 42 insertions(+), 12 deletions(-) diff --git a/substrate/client/utils/src/metrics.rs b/substrate/client/utils/src/metrics.rs index 6bbdbe2e2e599..308e90cb25379 100644 --- a/substrate/client/utils/src/metrics.rs +++ b/substrate/client/utils/src/metrics.rs @@ -24,7 +24,10 @@ use prometheus::{ Error as PrometheusError, Registry, }; -use prometheus::{core::GenericCounterVec, Opts}; +use prometheus::{ + core::{GenericCounterVec, GenericGaugeVec}, + Opts, +}; lazy_static! { pub static ref TOKIO_THREADS_TOTAL: GenericCounter = @@ -36,18 +39,32 @@ lazy_static! { } lazy_static! { - pub static ref UNBOUNDED_CHANNELS_COUNTER : GenericCounterVec = GenericCounterVec::new( - Opts::new("substrate_unbounded_channel_len", "Items in each mpsc::unbounded instance"), - &["entity", "action"] // 'name of channel, send|received|dropped + pub static ref UNBOUNDED_CHANNELS_COUNTER: GenericCounterVec = GenericCounterVec::new( + Opts::new( + "substrate_unbounded_channel_len", + "Items sent/received/dropped on each mpsc::unbounded instance" + ), + &["entity", "action"], // name of channel, send|received|dropped + ).expect("Creating of statics doesn't fail. qed"); + pub static ref UNBOUNDED_CHANNELS_SIZE: GenericGaugeVec = GenericGaugeVec::new( + Opts::new( + "substrate_unbounded_channel_size", + "Size (number of messages to be processed) of each mpsc::unbounded instance", + ), + &["entity"], // name of channel ).expect("Creating of statics doesn't fail. qed"); - } +pub static SENT_LABEL: &'static str = "send"; +pub static RECEIVED_LABEL: &'static str = "received"; +pub static DROPPED_LABEL: &'static str = "dropped"; + /// Register the statics to report to registry pub fn register_globals(registry: &Registry) -> Result<(), PrometheusError> { registry.register(Box::new(TOKIO_THREADS_ALIVE.clone()))?; registry.register(Box::new(TOKIO_THREADS_TOTAL.clone()))?; registry.register(Box::new(UNBOUNDED_CHANNELS_COUNTER.clone()))?; + registry.register(Box::new(UNBOUNDED_CHANNELS_SIZE.clone()))?; Ok(()) } diff --git a/substrate/client/utils/src/mpsc.rs b/substrate/client/utils/src/mpsc.rs index 039e03f9e6188..c24a5bd8904af 100644 --- a/substrate/client/utils/src/mpsc.rs +++ b/substrate/client/utils/src/mpsc.rs @@ -20,7 +20,9 @@ pub use async_channel::{TryRecvError, TrySendError}; -use crate::metrics::UNBOUNDED_CHANNELS_COUNTER; +use crate::metrics::{ + DROPPED_LABEL, RECEIVED_LABEL, SENT_LABEL, UNBOUNDED_CHANNELS_COUNTER, UNBOUNDED_CHANNELS_SIZE, +}; use async_channel::{Receiver, Sender}; use futures::{ stream::{FusedStream, Stream}, @@ -102,7 +104,10 @@ impl TracingUnboundedSender { /// Proxy function to `async_channel::Sender::try_send`. pub fn unbounded_send(&self, msg: T) -> Result<(), TrySendError> { self.inner.try_send(msg).map(|s| { - UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[self.name, "send"]).inc(); + UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[self.name, SENT_LABEL]).inc(); + UNBOUNDED_CHANNELS_SIZE + .with_label_values(&[self.name]) + .set(self.inner.len().saturated_into()); if self.inner.len() >= self.queue_size_warning && self.warning_fired @@ -140,7 +145,10 @@ impl TracingUnboundedReceiver { /// that discounts the messages taken out. pub fn try_recv(&mut self) -> Result { self.inner.try_recv().map(|s| { - UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[self.name, "received"]).inc(); + UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[self.name, RECEIVED_LABEL]).inc(); + UNBOUNDED_CHANNELS_SIZE + .with_label_values(&[self.name]) + .set(self.inner.len().saturated_into()); s }) } @@ -155,14 +163,16 @@ impl Drop for TracingUnboundedReceiver { fn drop(&mut self) { // Close the channel to prevent any further messages to be sent into the channel self.close(); - // the number of messages about to be dropped + // The number of messages about to be dropped let count = self.inner.len(); - // discount the messages + // Discount the messages if count > 0 { UNBOUNDED_CHANNELS_COUNTER - .with_label_values(&[self.name, "dropped"]) + .with_label_values(&[self.name, DROPPED_LABEL]) .inc_by(count.saturated_into()); } + // Reset the size metric to 0 + UNBOUNDED_CHANNELS_SIZE.with_label_values(&[self.name]).set(0); // Drain all the pending messages in the channel since they can never be accessed, // this can be removed once https://github.com/smol-rs/async-channel/issues/23 is // resolved @@ -180,7 +190,10 @@ impl Stream for TracingUnboundedReceiver { match Pin::new(&mut s.inner).poll_next(cx) { Poll::Ready(msg) => { if msg.is_some() { - UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[s.name, "received"]).inc(); + UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[s.name, RECEIVED_LABEL]).inc(); + UNBOUNDED_CHANNELS_SIZE + .with_label_values(&[s.name]) + .set(s.inner.len().saturated_into()); } Poll::Ready(msg) }, From fea7bcd6258a49676f70c8da0f8525de0b7b83d5 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Tue, 12 Sep 2023 14:07:09 +0200 Subject: [PATCH 11/16] [CI] Disable runtime logging for benchmarks (#1463) Changes: - Disable runtime logging in benchmarks by building with a specific profile --------- Signed-off-by: Oliver Tale-Yazdi --- .gitlab/pipeline/build.yml | 4 ++-- .gitlab/pipeline/short-benchmarks.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.gitlab/pipeline/build.yml b/.gitlab/pipeline/build.yml index 924d648e3f989..3085d2230063a 100644 --- a/.gitlab/pipeline/build.yml +++ b/.gitlab/pipeline/build.yml @@ -146,7 +146,7 @@ build-short-benchmark: - .run-immediately - .collect-artifacts script: - - cargo build --profile release --locked --features=runtime-benchmarks --bin polkadot --workspace + - cargo build --profile release --locked --features=runtime-benchmarks,on-chain-release-build --bin polkadot --workspace - mkdir -p artifacts - target/release/polkadot --version - cp ./target/release/polkadot ./artifacts/ @@ -273,7 +273,7 @@ build-short-benchmark-cumulus: - .run-immediately - .collect-artifacts script: - - cargo build --profile release --locked --features=runtime-benchmarks -p polkadot-parachain-bin --bin polkadot-parachain + - cargo build --profile release --locked --features=runtime-benchmarks,on-chain-release-build -p polkadot-parachain-bin --bin polkadot-parachain --workspace - mkdir -p artifacts - target/release/polkadot-parachain --version - cp ./target/release/polkadot-parachain ./artifacts/ diff --git a/.gitlab/pipeline/short-benchmarks.yml b/.gitlab/pipeline/short-benchmarks.yml index 81601fba32acf..7b7704ee66d8c 100644 --- a/.gitlab/pipeline/short-benchmarks.yml +++ b/.gitlab/pipeline/short-benchmarks.yml @@ -18,7 +18,7 @@ short-benchmark-polkadot: &short-bench tags: - benchmark script: - - ./artifacts/polkadot benchmark pallet --execution wasm --wasm-execution compiled --chain $RUNTIME-dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 + - ./artifacts/polkadot benchmark pallet --chain $RUNTIME-dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 short-benchmark-kusama: <<: *short-bench @@ -45,7 +45,7 @@ short-benchmark-westend: tags: - benchmark script: - - ./artifacts/polkadot-parachain benchmark pallet --wasm-execution compiled --chain $RUNTIME_CHAIN --pallet "*" --extrinsic "*" --steps 2 --repeat 1 + - ./artifacts/polkadot-parachain benchmark pallet --chain $RUNTIME_CHAIN --pallet "*" --extrinsic "*" --steps 2 --repeat 1 short-benchmark-asset-hub-polkadot: <<: *short-bench-cumulus From e005aef59b229cf173d0aa84b73a78af49860a85 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 12 Sep 2023 14:12:10 +0200 Subject: [PATCH 12/16] Make the node version independent of the crate version (#1495) --- polkadot/cli/src/cli.rs | 11 +++++++++-- polkadot/cli/src/command.rs | 3 ++- polkadot/node/core/pvf/common/src/worker/mod.rs | 2 +- polkadot/src/bin/execute-worker.rs | 2 +- polkadot/src/bin/prepare-worker.rs | 2 +- substrate/utils/build-script-utils/src/version.rs | 1 + 6 files changed, 15 insertions(+), 6 deletions(-) diff --git a/polkadot/cli/src/cli.rs b/polkadot/cli/src/cli.rs index 66205902b79df..aaf8f17057607 100644 --- a/polkadot/cli/src/cli.rs +++ b/polkadot/cli/src/cli.rs @@ -19,8 +19,15 @@ use clap::Parser; use std::path::PathBuf; -/// The version of the node. The passed-in version of the workers should match this. -pub const NODE_VERSION: &'static str = env!("SUBSTRATE_CLI_IMPL_VERSION"); +/// The version of the node. +/// +/// This is the version that is used for versioning this node binary. +/// By default the `minor` version is bumped in every release. `Major` or `patch` releases are only +/// expected in very rare cases. +/// +/// The worker binaries associated to the node binary should ensure that they are using the same +/// version as the main node that started them. +pub const NODE_VERSION: &'static str = "1.1.0"; #[allow(missing_docs)] #[derive(Debug, Parser)] diff --git a/polkadot/cli/src/command.rs b/polkadot/cli/src/command.rs index a2a00d0ebd3f8..a081e1b5181bf 100644 --- a/polkadot/cli/src/command.rs +++ b/polkadot/cli/src/command.rs @@ -55,7 +55,8 @@ impl SubstrateCli for Cli { } fn impl_version() -> String { - NODE_VERSION.into() + let commit_hash = env!("SUBSTRATE_CLI_COMMIT_HASH"); + format!("{NODE_VERSION}-{commit_hash}") } fn description() -> String { diff --git a/polkadot/node/core/pvf/common/src/worker/mod.rs b/polkadot/node/core/pvf/common/src/worker/mod.rs index a3f8e777c48b8..bcdf882f300c0 100644 --- a/polkadot/node/core/pvf/common/src/worker/mod.rs +++ b/polkadot/node/core/pvf/common/src/worker/mod.rs @@ -33,7 +33,7 @@ use tokio::{io, net::UnixStream, runtime::Runtime}; /// spawning the desired worker. #[macro_export] macro_rules! decl_worker_main { - ($expected_command:expr, $entrypoint:expr, $worker_version:expr) => { + ($expected_command:expr, $entrypoint:expr, $worker_version:expr $(,)*) => { fn print_help(expected_command: &str) { println!("{} {}", expected_command, $worker_version); println!(); diff --git a/polkadot/src/bin/execute-worker.rs b/polkadot/src/bin/execute-worker.rs index 72cab799d753e..1deb365809860 100644 --- a/polkadot/src/bin/execute-worker.rs +++ b/polkadot/src/bin/execute-worker.rs @@ -19,5 +19,5 @@ polkadot_node_core_pvf_common::decl_worker_main!( "execute-worker", polkadot_node_core_pvf_execute_worker::worker_entrypoint, - env!("SUBSTRATE_CLI_IMPL_VERSION") + polkadot_cli::NODE_VERSION, ); diff --git a/polkadot/src/bin/prepare-worker.rs b/polkadot/src/bin/prepare-worker.rs index 695f66cc7b7d3..d731f8a30d065 100644 --- a/polkadot/src/bin/prepare-worker.rs +++ b/polkadot/src/bin/prepare-worker.rs @@ -19,5 +19,5 @@ polkadot_node_core_pvf_common::decl_worker_main!( "prepare-worker", polkadot_node_core_pvf_prepare_worker::worker_entrypoint, - env!("SUBSTRATE_CLI_IMPL_VERSION") + polkadot_cli::NODE_VERSION, ); diff --git a/substrate/utils/build-script-utils/src/version.rs b/substrate/utils/build-script-utils/src/version.rs index 4ee5376ed322d..309c6d71d77b1 100644 --- a/substrate/utils/build-script-utils/src/version.rs +++ b/substrate/utils/build-script-utils/src/version.rs @@ -41,6 +41,7 @@ pub fn generate_cargo_keys() { } }; + println!("cargo:rustc-env=SUBSTRATE_CLI_COMMIT_HASH={commit}"); println!("cargo:rustc-env=SUBSTRATE_CLI_IMPL_VERSION={}", get_version(&commit)) } From 730edeb68c3d2c31c65861bba17aa5e540bbddc5 Mon Sep 17 00:00:00 2001 From: Chris Sosnin <48099298+slumber@users.noreply.github.com> Date: Tue, 12 Sep 2023 16:22:39 +0300 Subject: [PATCH 13/16] add tests for `find_potential_parents` (#1338) (would have closed) https://github.com/paritytech/cumulus/issues/2831 --- cumulus/client/consensus/common/src/lib.rs | 2 + cumulus/client/consensus/common/src/tests.rs | 574 ++++++++++++++++++- 2 files changed, 553 insertions(+), 23 deletions(-) diff --git a/cumulus/client/consensus/common/src/lib.rs b/cumulus/client/consensus/common/src/lib.rs index 29ee7356ee24d..08bceabb2bd4a 100644 --- a/cumulus/client/consensus/common/src/lib.rs +++ b/cumulus/client/consensus/common/src/lib.rs @@ -211,6 +211,7 @@ pub trait ParachainBlockImportMarker {} impl ParachainBlockImportMarker for ParachainBlockImport {} /// Parameters when searching for suitable parents to build on top of. +#[derive(Debug)] pub struct ParentSearchParams { /// The relay-parent that is intended to be used. pub relay_parent: PHash, @@ -228,6 +229,7 @@ pub struct ParentSearchParams { } /// A potential parent block returned from [`find_potential_parents`] +#[derive(Debug, PartialEq)] pub struct PotentialParent { /// The hash of the block. pub hash: B::Hash, diff --git a/cumulus/client/consensus/common/src/tests.rs b/cumulus/client/consensus/common/src/tests.rs index 15586d81d9bff..22d3dd3abd49d 100644 --- a/cumulus/client/consensus/common/src/tests.rs +++ b/cumulus/client/consensus/common/src/tests.rs @@ -19,7 +19,10 @@ use crate::*; use async_trait::async_trait; use codec::Encode; use cumulus_client_pov_recovery::RecoveryKind; -use cumulus_primitives_core::{relay_chain::BlockId, InboundDownwardMessage, InboundHrmpMessage}; +use cumulus_primitives_core::{ + relay_chain::{self, BlockId}, + CumulusDigestItem, InboundDownwardMessage, InboundHrmpMessage, +}; use cumulus_relay_chain_interface::{ CommittedCandidateReceipt, OccupiedCoreAssumption, OverseerHandle, PHeader, ParaId, RelayChainInterface, RelayChainResult, SessionIndex, StorageValue, ValidatorId, @@ -42,12 +45,21 @@ use std::{ time::Duration, }; +fn relay_block_num_from_hash(hash: &PHash) -> relay_chain::BlockNumber { + hash.to_low_u64_be() as u32 +} + +fn relay_hash_from_block_num(block_number: relay_chain::BlockNumber) -> PHash { + PHash::from_low_u64_be(block_number as u64) +} + struct RelaychainInner { new_best_heads: Option>, finalized_heads: Option>, new_best_heads_sender: mpsc::UnboundedSender
, finalized_heads_sender: mpsc::UnboundedSender
, relay_chain_hash_to_header: HashMap, + relay_chain_hash_to_header_pending: HashMap, } impl RelaychainInner { @@ -61,6 +73,7 @@ impl RelaychainInner { new_best_heads: Some(new_best_heads), finalized_heads: Some(finalized_heads), relay_chain_hash_to_header: Default::default(), + relay_chain_hash_to_header_pending: Default::default(), } } } @@ -110,20 +123,17 @@ impl RelayChainInterface for Relaychain { &self, hash: PHash, _: ParaId, - _: OccupiedCoreAssumption, + assumption: OccupiedCoreAssumption, ) -> RelayChainResult> { - Ok(Some(PersistedValidationData { - parent_head: self - .inner - .lock() - .unwrap() - .relay_chain_hash_to_header - .get(&hash) - .unwrap() - .encode() - .into(), - ..Default::default() - })) + let inner = self.inner.lock().unwrap(); + let relay_to_header = match assumption { + OccupiedCoreAssumption::Included => &inner.relay_chain_hash_to_header_pending, + _ => &inner.relay_chain_hash_to_header, + }; + let Some(parent_head) = relay_to_header.get(&hash).map(|head| head.encode().into()) else { + return Ok(None) + }; + Ok(Some(PersistedValidationData { parent_head, ..Default::default() })) } async fn candidate_pending_availability( @@ -135,7 +145,7 @@ impl RelayChainInterface for Relaychain { } async fn session_index_for_child(&self, _: PHash) -> RelayChainResult { - unimplemented!("Not needed for test") + Ok(0) } async fn import_notification_stream( @@ -210,8 +220,23 @@ impl RelayChainInterface for Relaychain { .boxed()) } - async fn header(&self, _block_id: BlockId) -> RelayChainResult> { - unimplemented!("Not needed for test") + async fn header(&self, block_id: BlockId) -> RelayChainResult> { + let number = match block_id { + BlockId::Hash(hash) => relay_block_num_from_hash(&hash), + BlockId::Number(block_number) => block_number, + }; + let parent_hash = number + .checked_sub(1) + .map(relay_hash_from_block_num) + .unwrap_or_else(|| PHash::zero()); + + Ok(Some(PHeader { + parent_hash, + number, + digest: sp_runtime::Digest::default(), + state_root: PHash::zero(), + extrinsics_root: PHash::zero(), + })) } } @@ -238,6 +263,7 @@ fn build_block( sproof: RelayStateSproofBuilder, at: Option, timestamp: Option, + relay_parent: Option, ) -> Block { let builder = match at { Some(at) => match timestamp { @@ -249,10 +275,17 @@ fn build_block( let mut block = builder.build().unwrap().block; - // Simulate some form of post activity (like a Seal or Other generic things). - // This is mostly used to exercise the `LevelMonitor` correct behavior. - // (in practice we want that header post-hash != pre-hash) - block.header.digest.push(sp_runtime::DigestItem::Other(vec![1, 2, 3])); + if let Some(relay_parent) = relay_parent { + block + .header + .digest + .push(CumulusDigestItem::RelayParent(relay_parent).to_digest_item()); + } else { + // Simulate some form of post activity (like a Seal or Other generic things). + // This is mostly used to exercise the `LevelMonitor` correct behavior. + // (in practice we want that header post-hash != pre-hash) + block.header.digest.push(sp_runtime::DigestItem::Other(vec![1, 2, 3])); + } block } @@ -292,13 +325,14 @@ fn build_and_import_block_ext>( importer: &mut I, at: Option, timestamp: Option, + relay_parent: Option, ) -> Block { let sproof = match at { None => sproof_with_best_parent(client), Some(at) => sproof_with_parent_by_hash(client, at), }; - let block = build_block(client, sproof, at, timestamp); + let block = build_block(client, sproof, at, timestamp, relay_parent); import_block_sync(importer, block.clone(), origin, import_as_best); block } @@ -311,6 +345,7 @@ fn build_and_import_block(mut client: Arc, import_as_best: bool) -> Bloc &mut client, None, None, + None, ) } @@ -372,7 +407,7 @@ fn follow_new_best_with_dummy_recovery_works() { let header = client.header(best).ok().flatten().expect("No header for best"); sproof_with_parent(HeadData(header.encode())) }; - let block = build_block(&*client, sproof, None, None); + let block = build_block(&*client, sproof, None, None, None); let block_clone = block.clone(); let client_clone = client.clone(); @@ -638,6 +673,7 @@ fn prune_blocks_on_level_overflow() { &mut para_import, None, None, + None, ); let id0 = block0.header.hash(); @@ -650,6 +686,7 @@ fn prune_blocks_on_level_overflow() { &mut para_import, Some(id0), Some(i as u64 * TIMESTAMP_MULTIPLIER), + None, ) }) .collect::>(); @@ -664,6 +701,7 @@ fn prune_blocks_on_level_overflow() { &mut para_import, Some(id10), Some(i as u64 * TIMESTAMP_MULTIPLIER), + None, ) }) .collect::>(); @@ -692,6 +730,7 @@ fn prune_blocks_on_level_overflow() { &mut para_import, Some(id0), Some(LEVEL_LIMIT as u64 * TIMESTAMP_MULTIPLIER), + None, ); // Expected scenario @@ -711,6 +750,7 @@ fn prune_blocks_on_level_overflow() { &mut para_import, Some(id0), Some(2 * LEVEL_LIMIT as u64 * TIMESTAMP_MULTIPLIER), + None, ); // Expected scenario @@ -749,6 +789,7 @@ fn restore_limit_monitor() { &mut para_import, None, None, + None, ); let id00 = block00.header.hash(); @@ -761,6 +802,7 @@ fn restore_limit_monitor() { &mut para_import, Some(id00), Some(i as u64 * TIMESTAMP_MULTIPLIER), + None, ) }) .collect::>(); @@ -775,6 +817,7 @@ fn restore_limit_monitor() { &mut para_import, Some(id10), Some(i as u64 * TIMESTAMP_MULTIPLIER), + None, ) }) .collect::>(); @@ -809,6 +852,7 @@ fn restore_limit_monitor() { &mut para_import, Some(id00), Some(LEVEL_LIMIT as u64 * TIMESTAMP_MULTIPLIER), + None, ); // Expected scenario @@ -830,3 +874,487 @@ fn restore_limit_monitor() { })); assert_eq!(*monitor.freshness.get(&block13.header.hash()).unwrap(), monitor.import_counter); } + +#[test] +fn find_potential_parents_in_allowed_ancestry() { + sp_tracing::try_init_simple(); + + let backend = Arc::new(Backend::new_test(1000, 1)); + let client = Arc::new(TestClientBuilder::with_backend(backend.clone()).build()); + let mut para_import = ParachainBlockImport::new(client.clone(), backend.clone()); + + let relay_parent = relay_hash_from_block_num(10); + let block = build_and_import_block_ext( + &client, + BlockOrigin::Own, + true, + &mut para_import, + None, + None, + Some(relay_parent), + ); + + let relay_chain = Relaychain::new(); + { + let included_map = &mut relay_chain.inner.lock().unwrap().relay_chain_hash_to_header; + included_map.insert(relay_parent, block.header().clone()); + } + + let potential_parents = block_on(find_potential_parents( + ParentSearchParams { + relay_parent, + para_id: ParaId::from(100), + ancestry_lookback: 0, + max_depth: 0, + ignore_alternative_branches: true, + }, + &*backend, + &relay_chain, + )) + .unwrap(); + assert_eq!(potential_parents.len(), 1); + let parent = &potential_parents[0]; + + assert_eq!(parent.hash, block.hash()); + assert_eq!(&parent.header, block.header()); + assert_eq!(parent.depth, 0); + assert!(parent.aligned_with_pending); + + // New block is not pending or included. + let block_relay_parent = relay_hash_from_block_num(11); + let search_relay_parent = relay_hash_from_block_num(13); + { + let included_map = &mut relay_chain.inner.lock().unwrap().relay_chain_hash_to_header; + included_map.insert(search_relay_parent, block.header().clone()); + } + let block = build_and_import_block_ext( + &client, + BlockOrigin::Own, + true, + &mut para_import, + Some(block.header().hash()), + None, + Some(block_relay_parent), + ); + let potential_parents = block_on(find_potential_parents( + ParentSearchParams { + relay_parent: search_relay_parent, + para_id: ParaId::from(100), + ancestry_lookback: 2, + max_depth: 1, + ignore_alternative_branches: true, + }, + &*backend, + &relay_chain, + )) + .unwrap(); + + assert_eq!(potential_parents.len(), 2); + let parent = &potential_parents[1]; + + assert_eq!(parent.hash, block.hash()); + assert_eq!(&parent.header, block.header()); + assert_eq!(parent.depth, 1); + assert!(parent.aligned_with_pending); + + // Reduce allowed ancestry. + let potential_parents = block_on(find_potential_parents( + ParentSearchParams { + relay_parent: search_relay_parent, + para_id: ParaId::from(100), + ancestry_lookback: 1, + max_depth: 1, + ignore_alternative_branches: true, + }, + &*backend, + &relay_chain, + )) + .unwrap(); + assert_eq!(potential_parents.len(), 1); + let parent = &potential_parents[0]; + assert_ne!(parent.hash, block.hash()); +} + +/// Tests that pending availability block is always potential parent. +#[test] +fn find_potential_pending_parent() { + sp_tracing::try_init_simple(); + + let backend = Arc::new(Backend::new_test(1000, 1)); + let client = Arc::new(TestClientBuilder::with_backend(backend.clone()).build()); + let mut para_import = ParachainBlockImport::new(client.clone(), backend.clone()); + + let relay_parent = relay_hash_from_block_num(10); + let included_block = build_and_import_block_ext( + &client, + BlockOrigin::Own, + true, + &mut para_import, + None, + None, + Some(relay_parent), + ); + let relay_parent = relay_hash_from_block_num(12); + let pending_block = build_and_import_block_ext( + &client, + BlockOrigin::Own, + true, + &mut para_import, + Some(included_block.header().hash()), + None, + Some(relay_parent), + ); + + let relay_chain = Relaychain::new(); + let search_relay_parent = relay_hash_from_block_num(15); + { + let relay_inner = &mut relay_chain.inner.lock().unwrap(); + relay_inner + .relay_chain_hash_to_header + .insert(search_relay_parent, included_block.header().clone()); + relay_inner + .relay_chain_hash_to_header_pending + .insert(search_relay_parent, pending_block.header().clone()); + } + + let potential_parents = block_on(find_potential_parents( + ParentSearchParams { + relay_parent: search_relay_parent, + para_id: ParaId::from(100), + ancestry_lookback: 0, + max_depth: 1, + ignore_alternative_branches: true, + }, + &*backend, + &relay_chain, + )) + .unwrap(); + assert_eq!(potential_parents.len(), 2); + let included_parent = &potential_parents[0]; + + assert_eq!(included_parent.hash, included_block.hash()); + assert_eq!(&included_parent.header, included_block.header()); + assert_eq!(included_parent.depth, 0); + assert!(included_parent.aligned_with_pending); + + let pending_parent = &potential_parents[1]; + + assert_eq!(pending_parent.hash, pending_block.hash()); + assert_eq!(&pending_parent.header, pending_block.header()); + assert_eq!(pending_parent.depth, 1); + assert!(pending_parent.aligned_with_pending); +} + +#[test] +fn find_potential_parents_with_max_depth() { + sp_tracing::try_init_simple(); + + const NON_INCLUDED_CHAIN_LEN: usize = 5; + + let backend = Arc::new(Backend::new_test(1000, 1)); + let client = Arc::new(TestClientBuilder::with_backend(backend.clone()).build()); + let mut para_import = ParachainBlockImport::new(client.clone(), backend.clone()); + + let relay_parent = relay_hash_from_block_num(10); + let included_block = build_and_import_block_ext( + &client, + BlockOrigin::Own, + true, + &mut para_import, + None, + None, + Some(relay_parent), + ); + + let relay_chain = Relaychain::new(); + { + let included_map = &mut relay_chain.inner.lock().unwrap().relay_chain_hash_to_header; + included_map.insert(relay_parent, included_block.header().clone()); + } + + let mut blocks = Vec::new(); + let mut parent = included_block.header().hash(); + for _ in 0..NON_INCLUDED_CHAIN_LEN { + let block = build_and_import_block_ext( + &client, + BlockOrigin::Own, + true, + &mut para_import, + Some(parent), + None, + Some(relay_parent), + ); + parent = block.header().hash(); + blocks.push(block); + } + for max_depth in 0..=NON_INCLUDED_CHAIN_LEN { + let potential_parents = block_on(find_potential_parents( + ParentSearchParams { + relay_parent, + para_id: ParaId::from(100), + ancestry_lookback: 0, + max_depth, + ignore_alternative_branches: true, + }, + &*backend, + &relay_chain, + )) + .unwrap(); + assert_eq!(potential_parents.len(), max_depth + 1); + let expected_parents: Vec<_> = + std::iter::once(&included_block).chain(blocks.iter().take(max_depth)).collect(); + + for i in 0..(max_depth + 1) { + let parent = &potential_parents[i]; + let expected = &expected_parents[i]; + + assert_eq!(parent.hash, expected.hash()); + assert_eq!(&parent.header, expected.header()); + assert_eq!(parent.depth, i); + assert!(parent.aligned_with_pending); + } + } +} + +#[test] +fn find_potential_parents_aligned_with_pending() { + sp_tracing::try_init_simple(); + + const NON_INCLUDED_CHAIN_LEN: usize = 5; + + let backend = Arc::new(Backend::new_test(1000, 1)); + let client = Arc::new(TestClientBuilder::with_backend(backend.clone()).build()); + let mut para_import = ParachainBlockImport::new(client.clone(), backend.clone()); + + let relay_parent = relay_hash_from_block_num(10); + // Choose different relay parent for alternative chain to get new hashes. + let search_relay_parent = relay_hash_from_block_num(11); + let included_block = build_and_import_block_ext( + &client, + BlockOrigin::NetworkInitialSync, + true, + &mut para_import, + None, + None, + Some(relay_parent), + ); + let pending_block = build_and_import_block_ext( + &client, + BlockOrigin::Own, + true, + &mut para_import, + Some(included_block.header().hash()), + None, + Some(relay_parent), + ); + + let relay_chain = Relaychain::new(); + { + let relay_inner = &mut relay_chain.inner.lock().unwrap(); + relay_inner + .relay_chain_hash_to_header + .insert(search_relay_parent, included_block.header().clone()); + relay_inner + .relay_chain_hash_to_header_pending + .insert(search_relay_parent, pending_block.header().clone()); + } + + // Build two sibling chains from the included block. + let mut aligned_blocks = Vec::new(); + let mut parent = pending_block.header().hash(); + for _ in 1..NON_INCLUDED_CHAIN_LEN { + let block = build_and_import_block_ext( + &client, + BlockOrigin::Own, + true, + &mut para_import, + Some(parent), + None, + Some(relay_parent), + ); + parent = block.header().hash(); + aligned_blocks.push(block); + } + + let mut alt_blocks = Vec::new(); + let mut parent = included_block.header().hash(); + for _ in 0..NON_INCLUDED_CHAIN_LEN { + let block = build_and_import_block_ext( + &client, + BlockOrigin::NetworkInitialSync, + true, + &mut para_import, + Some(parent), + None, + Some(search_relay_parent), + ); + parent = block.header().hash(); + alt_blocks.push(block); + } + + // Ignore alternative branch: + for max_depth in 0..=NON_INCLUDED_CHAIN_LEN { + let potential_parents = block_on(find_potential_parents( + ParentSearchParams { + relay_parent: search_relay_parent, + para_id: ParaId::from(100), + ancestry_lookback: 1, // aligned chain is in ancestry. + max_depth, + ignore_alternative_branches: true, + }, + &*backend, + &relay_chain, + )) + .unwrap(); + assert_eq!(potential_parents.len(), max_depth + 1); + let expected_parents: Vec<_> = [&included_block, &pending_block] + .into_iter() + .chain(aligned_blocks.iter()) + .take(max_depth + 1) + .collect(); + + for i in 0..(max_depth + 1) { + let parent = &potential_parents[i]; + let expected = &expected_parents[i]; + + assert_eq!(parent.hash, expected.hash()); + assert_eq!(&parent.header, expected.header()); + assert_eq!(parent.depth, i); + assert!(parent.aligned_with_pending); + } + } + + // Do not ignore: + for max_depth in 0..=NON_INCLUDED_CHAIN_LEN { + let potential_parents = block_on(find_potential_parents( + ParentSearchParams { + relay_parent: search_relay_parent, + para_id: ParaId::from(100), + ancestry_lookback: 1, // aligned chain is in ancestry. + max_depth, + ignore_alternative_branches: false, + }, + &*backend, + &relay_chain, + )) + .unwrap(); + + let expected_len = 2 * max_depth + 1; + assert_eq!(potential_parents.len(), expected_len); + let expected_aligned: Vec<_> = [&included_block, &pending_block] + .into_iter() + .chain(aligned_blocks.iter()) + .take(max_depth + 1) + .collect(); + let expected_alt = alt_blocks.iter().take(max_depth); + + let expected_parents: Vec<_> = + expected_aligned.clone().into_iter().chain(expected_alt).collect(); + // Check correctness. + assert_eq!(expected_parents.len(), expected_len); + + for i in 0..expected_len { + let parent = &potential_parents[i]; + let expected = expected_parents + .iter() + .find(|block| block.header().hash() == parent.hash) + .expect("missing parent"); + + let is_aligned = expected_aligned.contains(&expected); + + assert_eq!(parent.hash, expected.hash()); + assert_eq!(&parent.header, expected.header()); + + assert_eq!(parent.aligned_with_pending, is_aligned); + } + } +} + +/// Tests that no potential parent gets discarded if there's no pending availability block. +#[test] +fn find_potential_parents_aligned_no_pending() { + sp_tracing::try_init_simple(); + + const NON_INCLUDED_CHAIN_LEN: usize = 5; + + let backend = Arc::new(Backend::new_test(1000, 1)); + let client = Arc::new(TestClientBuilder::with_backend(backend.clone()).build()); + let mut para_import = ParachainBlockImport::new(client.clone(), backend.clone()); + + let relay_parent = relay_hash_from_block_num(10); + // Choose different relay parent for alternative chain to get new hashes. + let search_relay_parent = relay_hash_from_block_num(11); + let included_block = build_and_import_block_ext( + &client, + BlockOrigin::Own, + true, + &mut para_import, + None, + None, + Some(relay_parent), + ); + + let relay_chain = Relaychain::new(); + { + let included_map = &mut relay_chain.inner.lock().unwrap().relay_chain_hash_to_header; + included_map.insert(search_relay_parent, included_block.header().clone()); + } + + // Build two sibling chains from the included block. + let mut parent = included_block.header().hash(); + for _ in 0..NON_INCLUDED_CHAIN_LEN { + let block = build_and_import_block_ext( + &client, + BlockOrigin::Own, + true, + &mut para_import, + Some(parent), + None, + Some(relay_parent), + ); + parent = block.header().hash(); + } + + let mut parent = included_block.header().hash(); + for _ in 0..NON_INCLUDED_CHAIN_LEN { + let block = build_and_import_block_ext( + &client, + BlockOrigin::NetworkInitialSync, + true, + &mut para_import, + Some(parent), + None, + Some(search_relay_parent), + ); + parent = block.header().hash(); + } + + for max_depth in 0..=NON_INCLUDED_CHAIN_LEN { + let potential_parents_aligned = block_on(find_potential_parents( + ParentSearchParams { + relay_parent: search_relay_parent, + para_id: ParaId::from(100), + ancestry_lookback: 1, // aligned chain is in ancestry. + max_depth, + ignore_alternative_branches: true, + }, + &*backend, + &relay_chain, + )) + .unwrap(); + let potential_parents = block_on(find_potential_parents( + ParentSearchParams { + relay_parent: search_relay_parent, + para_id: ParaId::from(100), + ancestry_lookback: 1, + max_depth, + ignore_alternative_branches: false, + }, + &*backend, + &relay_chain, + )) + .unwrap(); + assert_eq!(potential_parents.len(), 2 * max_depth + 1); + assert_eq!(potential_parents, potential_parents_aligned); + } +} From 09630fc38f9c603d77b7adb9859f6ee1ac139289 Mon Sep 17 00:00:00 2001 From: Chris Sosnin <48099298+slumber@users.noreply.github.com> Date: Tue, 12 Sep 2023 17:15:36 +0300 Subject: [PATCH 14/16] lookahead collator: only build on top of scheduled relay parents (#1429) Partially addresses #1400 --------- Co-authored-by: Andrei Sandu <54316454+sandreim@users.noreply.github.com> --- .../consensus/aura/src/collators/lookahead.rs | 55 ++++++++++++++++++- 1 file changed, 53 insertions(+), 2 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index b1a7cde55c2c8..57cd646fbcdef 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -45,11 +45,13 @@ use cumulus_primitives_core::{ use cumulus_relay_chain_interface::RelayChainInterface; use polkadot_node_primitives::SubmitCollationParams; -use polkadot_node_subsystem::messages::CollationGenerationMessage; +use polkadot_node_subsystem::messages::{ + CollationGenerationMessage, RuntimeApiMessage, RuntimeApiRequest, +}; use polkadot_overseer::Handle as OverseerHandle; use polkadot_primitives::{CollatorPair, Id as ParaId, OccupiedCoreAssumption}; -use futures::prelude::*; +use futures::{channel::oneshot, prelude::*}; use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf}; use sc_consensus::BlockImport; use sc_consensus_aura::standalone as aura_internal; @@ -181,6 +183,17 @@ where while let Some(relay_parent_header) = import_notifications.next().await { let relay_parent = relay_parent_header.hash(); + if !is_para_scheduled(relay_parent, params.para_id, &mut params.overseer_handle).await { + tracing::trace!( + target: crate::LOG_TARGET, + ?relay_parent, + ?params.para_id, + "Para is not scheduled on any core, skipping import notification", + ); + + continue + } + let max_pov_size = match params .relay_client .persisted_validation_data( @@ -444,3 +457,41 @@ async fn max_ancestry_lookback( }, } } + +// Checks if there exists a scheduled core for the para at the provided relay parent. +// +// Falls back to `false` in case of an error. +async fn is_para_scheduled( + relay_parent: PHash, + para_id: ParaId, + overseer_handle: &mut OverseerHandle, +) -> bool { + let (tx, rx) = oneshot::channel(); + let request = RuntimeApiRequest::AvailabilityCores(tx); + overseer_handle + .send_msg(RuntimeApiMessage::Request(relay_parent, request), "LookaheadCollator") + .await; + + let cores = match rx.await { + Ok(Ok(cores)) => cores, + Ok(Err(error)) => { + tracing::error!( + target: crate::LOG_TARGET, + ?error, + ?relay_parent, + "Failed to query availability cores runtime API", + ); + return false + }, + Err(oneshot::Canceled) => { + tracing::error!( + target: crate::LOG_TARGET, + ?relay_parent, + "Sender for availability cores runtime request dropped", + ); + return false + }, + }; + + cores.iter().any(|core| core.para_id() == Some(para_id)) +} From ee6eeb74668502798d40f0c83e3710300fa9368e Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Tue, 12 Sep 2023 17:47:43 +0300 Subject: [PATCH 15/16] Allow justifications on non-finalized blocks (#1211) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit One assertion was unnecessary because of the check right above it, second assertion resolves https://github.com/paritytech/polkadot-sdk/issues/1159 --------- Co-authored-by: Bastian Köcher --- substrate/client/consensus/grandpa/src/import.rs | 1 - substrate/client/service/src/client/client.rs | 2 -- 2 files changed, 3 deletions(-) diff --git a/substrate/client/consensus/grandpa/src/import.rs b/substrate/client/consensus/grandpa/src/import.rs index 8481b39584729..ca5b7c400bfb2 100644 --- a/substrate/client/consensus/grandpa/src/import.rs +++ b/substrate/client/consensus/grandpa/src/import.rs @@ -552,7 +552,6 @@ where .into(), )) } - assert!(block.justifications.is_some()); let mut authority_set = self.authority_set.inner_locked(); authority_set.authority_set_changes.insert(number); crate::aux_schema::update_authority_set::( diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index a0983d823e5b1..09c1673884aac 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -603,8 +603,6 @@ where .block_gap .map_or(false, |(start, _)| *import_headers.post().number() == start); - assert!(justifications.is_some() && finalized || justifications.is_none() || gap_block); - // the block is lower than our last finalized block so it must revert // finality, refusing import. if status == blockchain::BlockStatus::Unknown && From f204e3264f945c33b4cea18a49f7232c180b07c5 Mon Sep 17 00:00:00 2001 From: Oleg Plakida <112385193+oleg-plakida@users.noreply.github.com> Date: Tue, 12 Sep 2023 19:44:47 +0100 Subject: [PATCH 16/16] Oleg/ci cd/add new tag (#1427) Update CI tag for docker vm runners --------- Co-authored-by: alvicsam --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 71518428b945b..10dd69f12a77c 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -114,7 +114,7 @@ default: - !reference [.rust-info-script, script] - !reference [.rusty-cachier, before_script] tags: - - linux-docker-vm-c2 + - linux-docker # rusty-cachier's hidden job. Parts of this job are used to instrument the pipeline's other real jobs with rusty-cachier # rusty-cachier's commands are described here: https://gitlab.parity.io/parity/infrastructure/ci_cd/rusty-cachier/client#description