diff --git a/bin/runtime-common/src/refund_relayer_extension.rs b/bin/runtime-common/src/refund_relayer_extension.rs index 9efeedf24dd..df4eae6f737 100644 --- a/bin/runtime-common/src/refund_relayer_extension.rs +++ b/bin/runtime-common/src/refund_relayer_extension.rs @@ -28,9 +28,12 @@ use codec::{Decode, Encode}; use frame_support::{ dispatch::{CallableCallFor, DispatchInfo, Dispatchable, PostDispatchInfo}, traits::IsSubType, + weights::Weight, CloneNoBound, DefaultNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, }; -use pallet_bridge_grandpa::{CallSubType as GrandpaCallSubType, SubmitFinalityProofHelper}; +use pallet_bridge_grandpa::{ + CallSubType as GrandpaCallSubType, SubmitFinalityProofHelper, SubmitFinalityProofInfo, +}; use pallet_bridge_messages::Config as MessagesConfig; use pallet_bridge_parachains::{ BoundedBridgeGrandpaConfig, CallSubType as ParachainsCallSubType, Config as ParachainsConfig, @@ -140,7 +143,11 @@ pub struct PreDispatchData { #[derive(RuntimeDebugNoBound, PartialEq)] pub enum CallInfo { /// Relay chain finality + parachain finality + message delivery calls. - AllFinalityAndDelivery(RelayBlockNumber, SubmitParachainHeadsInfo, ReceiveMessagesProofInfo), + AllFinalityAndDelivery( + SubmitFinalityProofInfo, + SubmitParachainHeadsInfo, + ReceiveMessagesProofInfo, + ), /// Parachain finality + message delivery calls. ParachainFinalityAndDelivery(SubmitParachainHeadsInfo, ReceiveMessagesProofInfo), /// Standalone message delivery call. @@ -149,7 +156,7 @@ pub enum CallInfo { impl CallInfo { /// Returns the pre-dispatch `finality_target` sent to the `SubmitFinalityProof` call. - fn submit_finality_proof_info(&self) -> Option { + fn submit_finality_proof_info(&self) -> Option> { match *self { Self::AllFinalityAndDelivery(info, _, _) => Some(info), _ => None, @@ -318,6 +325,9 @@ where len: usize, result: &DispatchResult, ) -> Result<(), TransactionValidityError> { + let mut extra_weight = Weight::zero(); + let mut extra_size = 0; + // We don't refund anything if the transaction has failed. if result.is_err() { return Ok(()) @@ -330,8 +340,10 @@ where }; // check if relay chain state has been updated - if let Some(relay_block_number) = call_info.submit_finality_proof_info() { - if !SubmitFinalityProofHelper::::was_successful(relay_block_number) { + if let Some(finality_proof_info) = call_info.submit_finality_proof_info() { + if !SubmitFinalityProofHelper::::was_successful( + finality_proof_info.block_number, + ) { // we only refund relayer if all calls have updated chain state return Ok(()) } @@ -342,6 +354,11 @@ where // `utility.batchAll` transaction always requires payment. But in both cases we'll // refund relayer - either explicitly here, or using `Pays::No` if he's choosing // to submit dedicated transaction. + + // submitter has means to include extra weight/bytes in the `submit_finality_proof` + // call, so let's subtract extra weight/size to avoid refunding for this extra stuff + extra_weight = finality_proof_info.extra_weight; + extra_size = finality_proof_info.extra_size; } // check if parachain state has been updated @@ -370,8 +387,15 @@ where // cost of this attack is nothing. Hence we use zero as tip here. let tip = Zero::zero(); + // decrease post-dispatch weight/size using extra weight/size that we know now + let post_info_len = len.saturating_sub(extra_size as usize); + let mut post_info = *post_info; + post_info.actual_weight = + Some(post_info.actual_weight.unwrap_or(info.weight).saturating_sub(extra_weight)); + // compute the relayer refund - let refund = Refund::compute_refund(info, post_info, len, tip); + let refund = Refund::compute_refund(info, &post_info, post_info_len, tip); + // finally - register refund in relayers pallet RelayersPallet::::register_relayer_reward(Msgs::Id::get(), &relayer, refund); @@ -397,9 +421,9 @@ mod tests { use bp_parachains::{BestParaHeadHash, ParaInfo}; use bp_polkadot_core::parachains::{ParaHash, ParaHeadsProof, ParaId}; use bp_runtime::HeaderId; - use bp_test_utils::make_default_justification; + use bp_test_utils::{make_default_justification, test_keyring}; use frame_support::{assert_storage_noop, parameter_types, weights::Weight}; - use pallet_bridge_grandpa::Call as GrandpaCall; + use pallet_bridge_grandpa::{Call as GrandpaCall, StoredAuthoritySet}; use pallet_bridge_messages::Call as MessagesCall; use pallet_bridge_parachains::{Call as ParachainsCall, RelayBlockHash}; use sp_runtime::{ @@ -434,7 +458,11 @@ mod tests { parachain_head_hash: ParaHash, best_delivered_message: MessageNonce, ) { + let authorities = test_keyring().into_iter().map(|(a, w)| (a.into(), w)).collect(); let best_relay_header = HeaderId(best_relay_header_number, RelayBlockHash::default()); + pallet_bridge_grandpa::CurrentAuthoritySet::::put( + StoredAuthoritySet::try_new(authorities, 0).unwrap(), + ); pallet_bridge_grandpa::BestFinalized::::put(best_relay_header); let para_id = ParaId(TestParachain::get()); @@ -524,7 +552,11 @@ mod tests { PreDispatchData { relayer: relayer_account_at_this_chain(), call_info: CallInfo::AllFinalityAndDelivery( - 200, + SubmitFinalityProofInfo { + block_number: 200, + extra_weight: Weight::zero(), + extra_size: 0, + }, SubmitParachainHeadsInfo { at_relay_block_number: 200, para_id: ParaId(TestParachain::get()), @@ -823,6 +855,54 @@ mod tests { }); } + #[test] + fn post_dispatch_refunds_relayer_in_all_finality_batch_with_extra_weight() { + run_test(|| { + initialize_environment(200, 200, [1u8; 32].into(), 200); + + let mut dispatch_info = dispatch_info(); + dispatch_info.weight = Weight::from_ref_time( + frame_support::weights::constants::WEIGHT_REF_TIME_PER_SECOND * 2, + ); + + // without any size/weight refund: we expect regular reward + let pre_dispatch_data = all_finality_pre_dispatch_data(); + let regular_reward = expected_reward(); + run_post_dispatch(Some(pre_dispatch_data), Ok(())); + assert_eq!( + RelayersPallet::::relayer_reward( + relayer_account_at_this_chain(), + TestLaneId::get() + ), + Some(regular_reward), + ); + + // now repeat the same with size+weight refund: we expect smaller reward + let mut pre_dispatch_data = all_finality_pre_dispatch_data(); + match pre_dispatch_data.call_info { + CallInfo::AllFinalityAndDelivery(ref mut info, ..) => { + info.extra_weight.set_ref_time( + frame_support::weights::constants::WEIGHT_REF_TIME_PER_SECOND, + ); + info.extra_size = 32; + }, + _ => unreachable!(), + } + run_post_dispatch(Some(pre_dispatch_data), Ok(())); + let reward_after_two_calls = RelayersPallet::::relayer_reward( + relayer_account_at_this_chain(), + TestLaneId::get(), + ) + .unwrap(); + assert!( + reward_after_two_calls < 2 * regular_reward, + "{} must be < 2 * {}", + reward_after_two_calls, + 2 * regular_reward, + ); + }); + } + #[test] fn post_dispatch_refunds_relayer_in_all_finality_batch() { run_test(|| { diff --git a/modules/grandpa/src/call_ext.rs b/modules/grandpa/src/call_ext.rs index 42c276f5f6c..b57aebb1ac1 100644 --- a/modules/grandpa/src/call_ext.rs +++ b/modules/grandpa/src/call_ext.rs @@ -14,17 +14,46 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -use crate::{Config, Error, Pallet}; +use crate::{weights::WeightInfo, BridgedBlockNumber, BridgedHeader, Config, Error, Pallet}; +use bp_header_chain::{justification::GrandpaJustification, ChainWithGrandpa}; use bp_runtime::BlockNumberOf; -use frame_support::{dispatch::CallableCallFor, traits::IsSubType}; +use codec::Encode; +use frame_support::{dispatch::CallableCallFor, traits::IsSubType, weights::Weight, RuntimeDebug}; use sp_runtime::{ - traits::Header, + traits::{Header, Zero}, transaction_validity::{InvalidTransaction, TransactionValidity, ValidTransaction}, + SaturatedConversion, }; +/// Info about a `SubmitParachainHeads` call which tries to update a single parachain. +#[derive(Copy, Clone, PartialEq, RuntimeDebug)] +pub struct SubmitFinalityProofInfo { + /// Number of the finality target. + pub block_number: N, + /// Extra weight that we assume is included in the call. + /// + /// We have some assumptions about headers and justifications of the bridged chain. + /// We know that if our assumptions are correct, then the call must not have the + /// weight above some limit. The fee paid for weight above that limit, is never refunded. + pub extra_weight: Weight, + /// Extra size (in bytes) that we assume are included in the call. + /// + /// We have some assumptions about headers and justifications of the bridged chain. + /// We know that if our assumptions are correct, then the call must not have the + /// weight above some limit. The fee paid for bytes above that limit, is never refunded. + pub extra_size: u32, +} + +impl SubmitFinalityProofInfo { + /// Returns `true` if call size/weight is below our estimations for regular calls. + pub fn fits_limits(&self) -> bool { + self.extra_weight.is_zero() && self.extra_size.is_zero() + } +} + /// Helper struct that provides methods for working with the `SubmitFinalityProof` call. pub struct SubmitFinalityProofHelper, I: 'static> { - pub _phantom_data: sp_std::marker::PhantomData<(T, I)>, + _phantom_data: sp_std::marker::PhantomData<(T, I)>, } impl, I: 'static> SubmitFinalityProofHelper { @@ -69,12 +98,17 @@ impl, I: 'static> SubmitFinalityProofHelper { pub trait CallSubType, I: 'static>: IsSubType, T>> { - /// Extract the finality target from a `SubmitParachainHeads` call. - fn submit_finality_proof_info(&self) -> Option> { - if let Some(crate::Call::::submit_finality_proof { finality_target, .. }) = + /// Extract finality proof info from a runtime call. + fn submit_finality_proof_info( + &self, + ) -> Option>> { + if let Some(crate::Call::::submit_finality_proof { finality_target, justification }) = self.is_sub_type() { - return Some(*finality_target.number()) + return Some(submit_finality_proof_info_from_args::( + finality_target, + justification, + )) } None @@ -92,7 +126,7 @@ pub trait CallSubType, I: 'static>: _ => return Ok(ValidTransaction::default()), }; - match SubmitFinalityProofHelper::::check_obsolete(finality_target) { + match SubmitFinalityProofHelper::::check_obsolete(finality_target.block_number) { Ok(_) => Ok(ValidTransaction::default()), Err(Error::::OldHeader) => InvalidTransaction::Stale.into(), Err(_) => InvalidTransaction::Call.into(), @@ -105,15 +139,66 @@ impl, I: 'static> CallSubType for T::RuntimeCall where { } +/// Extract finality proof info from the submitted header and justification. +pub(crate) fn submit_finality_proof_info_from_args, I: 'static>( + finality_target: &BridgedHeader, + justification: &GrandpaJustification>, +) -> SubmitFinalityProofInfo> { + let block_number = *finality_target.number(); + + // the `submit_finality_proof` call will reject justifications with invalid, duplicate, + // unknown and extra signatures. It'll also reject justifications with less than necessary + // signatures. So we do not care about extra weight because of additional signatures here. + let precommits_len = justification.commit.precommits.len().saturated_into(); + let required_precommits = precommits_len; + + // We do care about extra weight because of more-than-expected headers in the votes + // ancestries. But we have problems computing extra weight for additional headers (weight of + // additional header is too small, so that our benchmarks aren't detecting that). So if there + // are more than expected headers in votes ancestries, we will treat the whole call weight + // as an extra weight. + let votes_ancestries_len = justification.votes_ancestries.len().saturated_into(); + let extra_weight = + if votes_ancestries_len > T::BridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY { + T::WeightInfo::submit_finality_proof(precommits_len, votes_ancestries_len) + } else { + Weight::zero() + }; + + // we can estimate extra call size easily, without any additional significant overhead + let actual_call_size: u32 = finality_target + .encoded_size() + .saturating_add(justification.encoded_size()) + .saturated_into(); + let max_expected_call_size = max_expected_call_size::(required_precommits); + let extra_size = actual_call_size.saturating_sub(max_expected_call_size); + + SubmitFinalityProofInfo { block_number, extra_weight, extra_size } +} + +/// Returns maximal expected size of `submit_finality_proof` call arguments. +fn max_expected_call_size, I: 'static>(required_precommits: u32) -> u32 { + let max_expected_justification_size = + GrandpaJustification::max_reasonable_size::(required_precommits); + + // call arguments are header and justification + T::BridgedChain::MAX_HEADER_SIZE.saturating_add(max_expected_justification_size) +} + #[cfg(test)] mod tests { use crate::{ call_ext::CallSubType, - mock::{run_test, test_header, RuntimeCall, TestNumber, TestRuntime}, - BestFinalized, + mock::{run_test, test_header, RuntimeCall, TestBridgedChain, TestNumber, TestRuntime}, + BestFinalized, Config, WeightInfo, }; + use bp_header_chain::ChainWithGrandpa; use bp_runtime::HeaderId; - use bp_test_utils::make_default_justification; + use bp_test_utils::{ + make_default_justification, make_justification_for_header, JustificationGeneratorParams, + }; + use frame_support::weights::Weight; + use sp_runtime::{testing::DigestItem, traits::Header as _, SaturatedConversion}; fn validate_block_submit(num: TestNumber) -> bool { let bridge_grandpa_call = crate::Call::::submit_finality_proof { @@ -160,4 +245,67 @@ mod tests { assert!(validate_block_submit(15)); }); } + + #[test] + fn extension_returns_correct_extra_size_if_call_arguments_are_too_large() { + // when call arguments are below our limit => no refund + let small_finality_target = test_header(1); + let justification_params = JustificationGeneratorParams { + header: small_finality_target.clone(), + ..Default::default() + }; + let small_justification = make_justification_for_header(justification_params); + let small_call = RuntimeCall::Grandpa(crate::Call::submit_finality_proof { + finality_target: Box::new(small_finality_target), + justification: small_justification, + }); + assert_eq!(small_call.submit_finality_proof_info().unwrap().extra_size, 0); + + // when call arguments are too large => partial refund + let mut large_finality_target = test_header(1); + large_finality_target + .digest_mut() + .push(DigestItem::Other(vec![42u8; 1024 * 1024])); + let justification_params = JustificationGeneratorParams { + header: large_finality_target.clone(), + ..Default::default() + }; + let large_justification = make_justification_for_header(justification_params); + let large_call = RuntimeCall::Grandpa(crate::Call::submit_finality_proof { + finality_target: Box::new(large_finality_target), + justification: large_justification, + }); + assert_ne!(large_call.submit_finality_proof_info().unwrap().extra_size, 0); + } + + #[test] + fn extension_returns_correct_extra_weight_if_there_are_too_many_headers_in_votes_ancestry() { + let finality_target = test_header(1); + let mut justification_params = JustificationGeneratorParams { + header: finality_target.clone(), + ancestors: TestBridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY, + ..Default::default() + }; + + // when there are `REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY` headers => no refund + let justification = make_justification_for_header(justification_params.clone()); + let call = RuntimeCall::Grandpa(crate::Call::submit_finality_proof { + finality_target: Box::new(finality_target.clone()), + justification, + }); + assert_eq!(call.submit_finality_proof_info().unwrap().extra_weight, Weight::zero()); + + // when there are `REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY + 1` headers => full refund + justification_params.ancestors += 1; + let justification = make_justification_for_header(justification_params); + let call_weight = ::WeightInfo::submit_finality_proof( + justification.commit.precommits.len().saturated_into(), + justification.votes_ancestries.len().saturated_into(), + ); + let call = RuntimeCall::Grandpa(crate::Call::submit_finality_proof { + finality_target: Box::new(finality_target), + justification, + }); + assert_eq!(call.submit_finality_proof_info().unwrap().extra_weight, call_weight); + } } diff --git a/modules/grandpa/src/lib.rs b/modules/grandpa/src/lib.rs index b11da53f7b3..e94d91a5c16 100644 --- a/modules/grandpa/src/lib.rs +++ b/modules/grandpa/src/lib.rs @@ -36,7 +36,7 @@ // Runtime-generated enums #![allow(clippy::large_enum_variant)] -use storage_types::StoredAuthoritySet; +pub use storage_types::StoredAuthoritySet; use bp_header_chain::{ justification::GrandpaJustification, ChainWithGrandpa, HeaderChain, InitializationData, @@ -180,6 +180,9 @@ pub mod pallet { let is_authorities_change_enacted = try_enact_authority_change::(&finality_target, set_id)?; + let may_refund_call_fee = is_authorities_change_enacted && + submit_finality_proof_info_from_args::(&*finality_target, &justification) + .fits_limits(); >::mutate(|count| *count += 1); insert_header::(*finality_target, hash); log::info!( @@ -193,8 +196,10 @@ pub mod pallet { // // We don't want to charge extra costs for mandatory operations. So relayer is not // paying fee for mandatory headers import transactions. - let is_mandatory_header = is_authorities_change_enacted; - let pays_fee = if is_mandatory_header { Pays::No } else { Pays::Yes }; + // + // If size/weight of the call is exceeds our estimated limits, the relayer still needs + // to pay for the transaction. + let pays_fee = if may_refund_call_fee { Pays::No } else { Pays::Yes }; // the proof size component of the call weight assumes that there are // `MaxBridgedAuthorities` in the `CurrentAuthoritySet` (we use `MaxEncodedLen` @@ -313,7 +318,7 @@ pub mod pallet { /// The current GRANDPA Authority set. #[pallet::storage] - pub(super) type CurrentAuthoritySet, I: 'static = ()> = + pub type CurrentAuthoritySet, I: 'static = ()> = StorageValue<_, StoredAuthoritySet, ValueQuery>; /// Optional pallet owner. @@ -504,7 +509,7 @@ pub mod pallet { init_params; let authority_set_length = authority_list.len(); let authority_set = StoredAuthoritySet::::try_new(authority_list, set_id) - .map_err(|_| { + .map_err(|e| { log::error!( target: LOG_TARGET, "Failed to initialize bridge. Number of authorities in the set {} is larger than the configured value {}", @@ -512,7 +517,7 @@ pub mod pallet { T::BridgedChain::MAX_AUTHORITIES_COUNT, ); - Error::TooManyAuthoritiesInSet + e })?; let initial_hash = header.hash(); @@ -630,8 +635,8 @@ pub fn initialize_for_benchmarks, I: 'static>(header: BridgedHeader mod tests { use super::*; use crate::mock::{ - run_test, test_header, RuntimeOrigin, TestHeader, TestNumber, TestRuntime, - MAX_BRIDGED_AUTHORITIES, + run_test, test_header, RuntimeOrigin, TestBridgedChain, TestHeader, TestNumber, + TestRuntime, MAX_BRIDGED_AUTHORITIES, }; use bp_header_chain::BridgeGrandpaCall; use bp_runtime::BasicOperatingMode; @@ -965,6 +970,64 @@ mod tests { }) } + #[test] + fn relayer_pays_tx_fee_when_submitting_huge_mandatory_header() { + run_test(|| { + initialize_substrate_bridge(); + + // let's prepare a huge authorities change header, which is definitely above size limits + let mut header = test_header(2); + header.digest = change_log(0); + header.digest.push(DigestItem::Other(vec![42u8; 1024 * 1024])); + let justification = make_default_justification(&header); + + // without large digest item ^^^ the relayer would have paid zero transaction fee + // (`Pays::No`) + let result = Pallet::::submit_finality_proof( + RuntimeOrigin::signed(1), + Box::new(header.clone()), + justification, + ); + assert_ok!(result); + assert_eq!(result.unwrap().pays_fee, frame_support::dispatch::Pays::Yes); + + // Make sure that our header is the best finalized + assert_eq!(>::get().unwrap().1, header.hash()); + assert!(>::contains_key(header.hash())); + }) + } + + #[test] + fn relayer_pays_tx_fee_when_submitting_justification_with_long_ancestry_votes() { + run_test(|| { + initialize_substrate_bridge(); + + // let's prepare a huge authorities change header, which is definitely above weight + // limits + let mut header = test_header(2); + header.digest = change_log(0); + let justification = make_justification_for_header(JustificationGeneratorParams { + header: header.clone(), + ancestors: TestBridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY + 1, + ..Default::default() + }); + + // without many headers in votes ancestries ^^^ the relayer would have paid zero + // transaction fee (`Pays::No`) + let result = Pallet::::submit_finality_proof( + RuntimeOrigin::signed(1), + Box::new(header.clone()), + justification, + ); + assert_ok!(result); + assert_eq!(result.unwrap().pays_fee, frame_support::dispatch::Pays::Yes); + + // Make sure that our header is the best finalized + assert_eq!(>::get().unwrap().1, header.hash()); + assert!(>::contains_key(header.hash())); + }) + } + #[test] fn importing_header_rejects_header_with_scheduled_change_delay() { run_test(|| { diff --git a/modules/grandpa/src/storage_types.rs b/modules/grandpa/src/storage_types.rs index 918c131289c..d674f1a7b0a 100644 --- a/modules/grandpa/src/storage_types.rs +++ b/modules/grandpa/src/storage_types.rs @@ -16,7 +16,7 @@ //! Wrappers for public types that are implementing `MaxEncodedLen` -use crate::Config; +use crate::{Config, Error}; use bp_header_chain::{AuthoritySet, ChainWithGrandpa}; use codec::{Decode, Encode, MaxEncodedLen}; @@ -52,8 +52,12 @@ impl, I: 'static> StoredAuthoritySet { /// Try to create a new bounded GRANDPA Authority Set from unbounded list. /// /// Returns error if number of authorities in the provided list is too large. - pub fn try_new(authorities: AuthorityList, set_id: SetId) -> Result { - Ok(Self { authorities: TryFrom::try_from(authorities).map_err(drop)?, set_id }) + pub fn try_new(authorities: AuthorityList, set_id: SetId) -> Result> { + Ok(Self { + authorities: TryFrom::try_from(authorities) + .map_err(|_| Error::TooManyAuthoritiesInSet)?, + set_id, + }) } /// Returns number of bytes that may be subtracted from the PoV component of diff --git a/modules/messages/src/lib.rs b/modules/messages/src/lib.rs index cc9033d1058..68af3024445 100644 --- a/modules/messages/src/lib.rs +++ b/modules/messages/src/lib.rs @@ -43,6 +43,7 @@ pub use weights::WeightInfo; pub use weights_ext::{ ensure_able_to_receive_confirmation, ensure_able_to_receive_message, ensure_weights_are_correct, WeightInfoExt, EXPECTED_DEFAULT_MESSAGE_LENGTH, + EXTRA_STORAGE_PROOF_SIZE, }; use crate::{ diff --git a/modules/parachains/src/call_ext.rs b/modules/parachains/src/call_ext.rs index 5aed9e80ba0..ee3770146c2 100644 --- a/modules/parachains/src/call_ext.rs +++ b/modules/parachains/src/call_ext.rs @@ -23,14 +23,17 @@ use sp_runtime::transaction_validity::{InvalidTransaction, TransactionValidity, /// Info about a `SubmitParachainHeads` call which tries to update a single parachain. #[derive(PartialEq, RuntimeDebug)] pub struct SubmitParachainHeadsInfo { + /// Number of the finalized relay block that has been used to prove parachain finality. pub at_relay_block_number: RelayBlockNumber, + /// Parachain identifier. pub para_id: ParaId, + /// Hash of the bundled parachain head. pub para_head_hash: ParaHash, } /// Helper struct that provides methods for working with the `SubmitParachainHeads` call. pub struct SubmitParachainHeadsHelper, I: 'static> { - pub _phantom_data: sp_std::marker::PhantomData<(T, I)>, + _phantom_data: sp_std::marker::PhantomData<(T, I)>, } impl, I: 'static> SubmitParachainHeadsHelper { diff --git a/modules/parachains/src/mock.rs b/modules/parachains/src/mock.rs index 0e8261f6899..aabcdcdd364 100644 --- a/modules/parachains/src/mock.rs +++ b/modules/parachains/src/mock.rs @@ -187,7 +187,7 @@ impl frame_system::Config for TestRuntime { type BlockLength = (); type SS58Prefix = (); type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; + type MaxConsumers = ConstU32<16>; } parameter_types! { @@ -223,7 +223,7 @@ impl pallet_bridge_parachains::Config for TestRuntime { type ParasPalletName = ParasPalletName; type ParaStoredHeaderDataBuilder = (Parachain1, Parachain2, Parachain3, BigParachain); type HeadsToKeep = HeadsToKeep; - type MaxParaHeadDataSize = frame_support::traits::ConstU32; + type MaxParaHeadDataSize = ConstU32; } #[derive(Debug)] diff --git a/primitives/header-chain/src/justification.rs b/primitives/header-chain/src/justification.rs index 43adc801254..1e34f365621 100644 --- a/primitives/header-chain/src/justification.rs +++ b/primitives/header-chain/src/justification.rs @@ -19,12 +19,15 @@ //! Adapted copy of substrate/client/finality-grandpa/src/justification.rs. If origin //! will ever be moved to the sp_finality_grandpa, we should reuse that implementation. -use codec::{Decode, Encode}; +use crate::ChainWithGrandpa; + +use bp_runtime::{BlockNumberOf, Chain, HashOf}; +use codec::{Decode, Encode, MaxEncodedLen}; use finality_grandpa::voter_set::VoterSet; use frame_support::RuntimeDebug; use scale_info::TypeInfo; use sp_finality_grandpa::{AuthorityId, AuthoritySignature, SetId}; -use sp_runtime::traits::Header as HeaderT; +use sp_runtime::{traits::Header as HeaderT, SaturatedConversion}; use sp_std::{ collections::{btree_map::BTreeMap, btree_set::BTreeSet}, prelude::*, @@ -46,6 +49,43 @@ pub struct GrandpaJustification { pub votes_ancestries: Vec
, } +impl GrandpaJustification { + /// Returns reasonable size of justification using constants from the provided chain. + /// + /// An imprecise analogue of `MaxEncodedLen` implementation. We don't use it for + /// any precise calculations - that's just an estimation. + pub fn max_reasonable_size(required_precommits: u32) -> u32 + where + C: Chain
+ ChainWithGrandpa, + { + // we don't need precise results here - just estimations, so some details + // are removed from computations (e.g. bytes required to encode vector length) + + // structures in `finality_grandpa` crate are not implementing `MaxEncodedLength`, so + // here's our estimation for the `finality_grandpa::Commit` struct size + // + // precommit is: hash + number + // signed precommit is: precommit + signature (64b) + authority id + // commit is: hash + number + vec of signed precommits + let signed_precommit_size: u32 = BlockNumberOf::::max_encoded_len() + .saturating_add(HashOf::::max_encoded_len().saturated_into()) + .saturating_add(64) + .saturating_add(AuthorityId::max_encoded_len().saturated_into()) + .saturated_into(); + let max_expected_signed_commit_size = signed_precommit_size + .saturating_mul(required_precommits) + .saturating_add(BlockNumberOf::::max_encoded_len().saturated_into()) + .saturating_add(HashOf::::max_encoded_len().saturated_into()); + + // justification is a signed GRANDPA commit, `votes_ancestries` vector and round number + let max_expected_votes_ancestries_size = C::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY + .saturating_mul(C::AVERAGE_HEADER_SIZE_IN_JUSTIFICATION); + + 8u32.saturating_add(max_expected_signed_commit_size) + .saturating_add(max_expected_votes_ancestries_size) + } +} + impl crate::FinalityProof for GrandpaJustification { fn target_header_number(&self) -> H::Number { self.commit.target_number @@ -59,6 +99,12 @@ pub enum Error { JustificationDecode, /// Justification is finalizing unexpected header. InvalidJustificationTarget, + /// Justification contains redundant votes. + RedundantVotesInJustification, + /// Justification contains unknown authority precommit. + UnknownAuthorityVote, + /// Justification contains duplicate authority precommit. + DuplicateAuthorityVote, /// The authority has provided an invalid signature. InvalidAuthoritySignature, /// The justification contains precommit for header that is not a descendant of the commit @@ -124,7 +170,7 @@ where authorities_set_id, authorities_set, justification, - &mut (), + &mut StrictVerificationCallbacks, ) } @@ -138,6 +184,23 @@ trait VerificationCallbacks { fn on_redundant_authority_vote(&mut self, precommit_idx: usize) -> Result<(), Error>; } +/// Verification callbacks that reject all unknown, duplicate or redundant votes. +struct StrictVerificationCallbacks; + +impl VerificationCallbacks for StrictVerificationCallbacks { + fn on_unkown_authority(&mut self, _precommit_idx: usize) -> Result<(), Error> { + Err(Error::UnknownAuthorityVote) + } + + fn on_duplicate_authority_vote(&mut self, _precommit_idx: usize) -> Result<(), Error> { + Err(Error::DuplicateAuthorityVote) + } + + fn on_redundant_authority_vote(&mut self, _precommit_idx: usize) -> Result<(), Error> { + Err(Error::RedundantVotesInJustification) + } +} + /// Verification callbacks for justification optimization. struct OptimizationCallbacks(Vec); @@ -170,21 +233,6 @@ impl VerificationCallbacks for OptimizationCallbacks { } } -// this implementation will be removed in https://github.com/paritytech/parity-bridges-common/pull/1882 -impl VerificationCallbacks for () { - fn on_unkown_authority(&mut self, _precommit_idx: usize) -> Result<(), Error> { - Ok(()) - } - - fn on_duplicate_authority_vote(&mut self, _precommit_idx: usize) -> Result<(), Error> { - Ok(()) - } - - fn on_redundant_authority_vote(&mut self, _precommit_idx: usize) -> Result<(), Error> { - Ok(()) - } -} - /// Verify that justification, that is generated by given authority set, finalizes given header. fn verify_justification_with_callbacks( finalized_target: (Header::Hash, Header::Number), @@ -206,11 +254,13 @@ where let mut signature_buffer = Vec::new(); let mut votes = BTreeSet::new(); let mut cumulative_weight = 0u64; + for (precommit_idx, signed) in justification.commit.precommits.iter().enumerate() { // if we have collected enough precommits, we probabably want to fail/remove extra // precommits - if cumulative_weight > threshold { + if cumulative_weight >= threshold { callbacks.on_redundant_authority_vote(precommit_idx)?; + continue } // authority must be in the set diff --git a/primitives/header-chain/tests/implementation_match.rs b/primitives/header-chain/tests/implementation_match.rs index aaa19d4b918..5d0fa35c376 100644 --- a/primitives/header-chain/tests/implementation_match.rs +++ b/primitives/header-chain/tests/implementation_match.rs @@ -15,7 +15,8 @@ // along with Parity Bridges Common. If not, see . //! Tests inside this module are made to ensure that our custom justification verification -//! implementation works exactly as `fn finality_grandpa::validate_commit`. +//! implementation works similar to the [`finality_grandpa::validate_commit`] and explicitly +//! show where we behave different. //! //! Some of tests in this module may partially duplicate tests from `justification.rs`, //! but their purpose is different. @@ -23,7 +24,7 @@ use bp_header_chain::justification::{verify_justification, Error, GrandpaJustification}; use bp_test_utils::{ header_id, make_justification_for_header, signed_precommit, test_header, Account, - JustificationGeneratorParams, ALICE, BOB, CHARLIE, DAVE, EVE, TEST_GRANDPA_SET_ID, + JustificationGeneratorParams, ALICE, BOB, CHARLIE, DAVE, EVE, FERDIE, TEST_GRANDPA_SET_ID, }; use finality_grandpa::voter_set::VoterSet; use sp_finality_grandpa::{AuthorityId, AuthorityWeight}; @@ -172,7 +173,42 @@ fn same_result_when_precommit_target_is_not_descendant_of_commit_target() { } #[test] -fn same_result_when_justification_contains_duplicate_vote() { +fn same_result_when_there_are_not_enough_cumulative_weight_to_finalize_commit_target() { + // just remove one authority from the minimal set and we shall not reach the threshold + let mut authorities_set = minimal_accounts_set(); + authorities_set.pop(); + let justification = make_justification_for_header(JustificationGeneratorParams { + header: test_header(1), + authorities: authorities_set, + ..Default::default() + }); + + // our implementation returns an error + assert_eq!( + verify_justification::( + header_id::(1), + TEST_GRANDPA_SET_ID, + &full_voter_set(), + &justification, + ), + Err(Error::TooLowCumulativeWeight), + ); + // original implementation returns `Ok(validation_result)` + // with `validation_result.is_valid() == false`. + let result = finality_grandpa::validate_commit( + &justification.commit, + &full_voter_set(), + &AncestryChain::new(&justification.votes_ancestries), + ) + .unwrap(); + + assert!(!result.is_valid()); +} + +// tests below are our differences with the original implementation + +#[test] +fn different_result_when_justification_contains_duplicate_vote() { let mut justification = make_justification_for_header(JustificationGeneratorParams { header: test_header(1), authorities: minimal_accounts_set(), @@ -181,10 +217,11 @@ fn same_result_when_justification_contains_duplicate_vote() { }); // the justification may contain exactly the same vote (i.e. same precommit and same signature) // multiple times && it isn't treated as an error by original implementation + let last_precommit = justification.commit.precommits.pop().unwrap(); justification.commit.precommits.push(justification.commit.precommits[0].clone()); - justification.commit.precommits.push(justification.commit.precommits[0].clone()); + justification.commit.precommits.push(last_precommit); - // our implementation succeeds + // our implementation fails assert_eq!( verify_justification::( header_id::(1), @@ -192,7 +229,7 @@ fn same_result_when_justification_contains_duplicate_vote() { &full_voter_set(), &justification, ), - Ok(()), + Err(Error::DuplicateAuthorityVote), ); // original implementation returns `Ok(validation_result)` // with `validation_result.is_valid() == true`. @@ -207,7 +244,7 @@ fn same_result_when_justification_contains_duplicate_vote() { } #[test] -fn same_result_when_authority_equivocates_once_in_a_round() { +fn different_results_when_authority_equivocates_once_in_a_round() { let mut justification = make_justification_for_header(JustificationGeneratorParams { header: test_header(1), authorities: minimal_accounts_set(), @@ -216,14 +253,16 @@ fn same_result_when_authority_equivocates_once_in_a_round() { }); // the justification original implementation allows authority to submit two different // votes in a single round, of which only first is 'accepted' + let last_precommit = justification.commit.precommits.pop().unwrap(); justification.commit.precommits.push(signed_precommit::( &ALICE, header_id::(1), justification.round, TEST_GRANDPA_SET_ID, )); + justification.commit.precommits.push(last_precommit); - // our implementation succeeds + // our implementation fails assert_eq!( verify_justification::( header_id::(1), @@ -231,7 +270,7 @@ fn same_result_when_authority_equivocates_once_in_a_round() { &full_voter_set(), &justification, ), - Ok(()), + Err(Error::DuplicateAuthorityVote), ); // original implementation returns `Ok(validation_result)` // with `validation_result.is_valid() == true`. @@ -246,7 +285,7 @@ fn same_result_when_authority_equivocates_once_in_a_round() { } #[test] -fn same_result_when_authority_equivocates_twice_in_a_round() { +fn different_results_when_authority_equivocates_twice_in_a_round() { let mut justification = make_justification_for_header(JustificationGeneratorParams { header: test_header(1), authorities: minimal_accounts_set(), @@ -259,6 +298,8 @@ fn same_result_when_authority_equivocates_twice_in_a_round() { // but there's also a code that prevents this from happening: // https://github.com/paritytech/finality-grandpa/blob/6aeea2d1159d0f418f0b86e70739f2130629ca09/src/round.rs#L287 // => so now we are also just ignoring all votes from the same authority, except the first one + let last_precommit = justification.commit.precommits.pop().unwrap(); + let prev_last_precommit = justification.commit.precommits.pop().unwrap(); justification.commit.precommits.push(signed_precommit::( &ALICE, header_id::(1), @@ -271,8 +312,10 @@ fn same_result_when_authority_equivocates_twice_in_a_round() { justification.round, TEST_GRANDPA_SET_ID, )); + justification.commit.precommits.push(last_precommit); + justification.commit.precommits.push(prev_last_precommit); - // our implementation succeeds + // our implementation fails assert_eq!( verify_justification::( header_id::(1), @@ -280,7 +323,7 @@ fn same_result_when_authority_equivocates_twice_in_a_round() { &full_voter_set(), &justification, ), - Ok(()), + Err(Error::DuplicateAuthorityVote), ); // original implementation returns `Ok(validation_result)` // with `validation_result.is_valid() == true`. @@ -295,17 +338,23 @@ fn same_result_when_authority_equivocates_twice_in_a_round() { } #[test] -fn same_result_when_there_are_not_enough_cumulative_weight_to_finalize_commit_target() { - // just remove one authority from the minimal set and we shall not reach the threshold - let mut authorities_set = minimal_accounts_set(); - authorities_set.pop(); - let justification = make_justification_for_header(JustificationGeneratorParams { +fn different_results_when_there_are_more_than_enough_votes() { + let mut justification = make_justification_for_header(JustificationGeneratorParams { header: test_header(1), - authorities: authorities_set, + authorities: minimal_accounts_set(), + ancestors: 0, ..Default::default() }); + // the reference implementation just keep verifying signatures even if we have + // collected enough votes. We are not + justification.commit.precommits.push(signed_precommit::( + &EVE, + header_id::(1), + justification.round, + TEST_GRANDPA_SET_ID, + )); - // our implementation returns an error + // our implementation fails assert_eq!( verify_justification::( header_id::(1), @@ -313,10 +362,10 @@ fn same_result_when_there_are_not_enough_cumulative_weight_to_finalize_commit_ta &full_voter_set(), &justification, ), - Err(Error::TooLowCumulativeWeight), + Err(Error::RedundantVotesInJustification), ); // original implementation returns `Ok(validation_result)` - // with `validation_result.is_valid() == false`. + // with `validation_result.is_valid() == true`. let result = finality_grandpa::validate_commit( &justification.commit, &full_voter_set(), @@ -324,5 +373,46 @@ fn same_result_when_there_are_not_enough_cumulative_weight_to_finalize_commit_ta ) .unwrap(); - assert!(!result.is_valid()); + assert!(result.is_valid()); +} + +#[test] +fn different_results_when_there_is_a_vote_of_unknown_authority() { + let mut justification = make_justification_for_header(JustificationGeneratorParams { + header: test_header(1), + authorities: minimal_accounts_set(), + ancestors: 0, + ..Default::default() + }); + // the reference implementation just keep verifying signatures even if we have + // collected enough votes. We are not + let last_precommit = justification.commit.precommits.pop().unwrap(); + justification.commit.precommits.push(signed_precommit::( + &FERDIE, + header_id::(1), + justification.round, + TEST_GRANDPA_SET_ID, + )); + justification.commit.precommits.push(last_precommit); + + // our implementation fails + assert_eq!( + verify_justification::( + header_id::(1), + TEST_GRANDPA_SET_ID, + &full_voter_set(), + &justification, + ), + Err(Error::UnknownAuthorityVote), + ); + // original implementation returns `Ok(validation_result)` + // with `validation_result.is_valid() == true`. + let result = finality_grandpa::validate_commit( + &justification.commit, + &full_voter_set(), + &AncestryChain::new(&justification.votes_ancestries), + ) + .unwrap(); + + assert!(result.is_valid()); } diff --git a/primitives/header-chain/tests/justification.rs b/primitives/header-chain/tests/justification.rs index e18163313c9..f01f4ea2f10 100644 --- a/primitives/header-chain/tests/justification.rs +++ b/primitives/header-chain/tests/justification.rs @@ -16,14 +16,16 @@ //! Tests for Grandpa Justification code. -use bp_header_chain::justification::{optimize_justification, verify_justification, Error}; +use bp_header_chain::justification::{ + optimize_justification, required_justification_precommits, verify_justification, Error, +}; use bp_test_utils::*; type TestHeader = sp_runtime::testing::Header; #[test] fn valid_justification_accepted() { - let authorities = vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1), (DAVE, 1)]; + let authorities = vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1)]; let params = JustificationGeneratorParams { header: test_header(1), round: TEST_GRANDPA_ROUND, @@ -54,7 +56,7 @@ fn valid_justification_accepted_with_single_fork() { header: test_header(1), round: TEST_GRANDPA_ROUND, set_id: TEST_GRANDPA_SET_ID, - authorities: vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1), (DAVE, 1), (EVE, 1)], + authorities: vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1)], ancestors: 5, forks: 1, }; @@ -76,15 +78,16 @@ fn valid_justification_accepted_with_arbitrary_number_of_authorities() { use sp_finality_grandpa::AuthorityId; let n = 15; + let required_signatures = required_justification_precommits(n as _); let authorities = accounts(n).iter().map(|k| (*k, 1)).collect::>(); let params = JustificationGeneratorParams { header: test_header(1), round: TEST_GRANDPA_ROUND, set_id: TEST_GRANDPA_SET_ID, - authorities: authorities.clone(), + authorities: authorities.clone().into_iter().take(required_signatures as _).collect(), ancestors: n.into(), - forks: n.into(), + forks: required_signatures, }; let authorities = authorities diff --git a/primitives/polkadot-core/src/lib.rs b/primitives/polkadot-core/src/lib.rs index 3b1712042d8..10da6cd9b35 100644 --- a/primitives/polkadot-core/src/lib.rs +++ b/primitives/polkadot-core/src/lib.rs @@ -50,30 +50,37 @@ pub mod parachains; /// our bridge hub parachains huge. So let's stick to the real-world value here. /// /// Right now both Kusama and Polkadot aim to have around 1000 validators. Let's be safe here and -/// take twice as much here. -pub const MAX_AUTHORITIES_COUNT: u32 = 2_048; +/// take a bit more here. +pub const MAX_AUTHORITIES_COUNT: u32 = 1_256; /// Reasonable number of headers in the `votes_ancestries` on Polkadot-like chains. /// /// See [`bp_header_chain::ChainWithGrandpa`] for more details. -pub const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 8; +/// +/// This value comes from recent (February, 2023) Kusama and Polkadot headers. There are no +/// justifications with any additional headers in votes ancestry, so reasonable headers may +/// be set to zero. But we assume that there may be small GRANDPA lags, so we're leaving some +/// reserve here. +pub const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 2; /// Approximate average header size in `votes_ancestries` field of justification on Polkadot-like /// chains. /// /// See [`bp_header_chain::ChainWithGrandpa`] for more details. -pub const AVERAGE_HEADER_SIZE_IN_JUSTIFICATION: u32 = 256; +/// +/// This value comes from recent (February, 2023) Kusama headers. Average is `336` there, but some +/// non-mandatory headers has size `40kb` (they contain the BABE epoch descriptor with all +/// authorities - just like our mandatory header). Since we assume `2` headers in justification +/// votes ancestry, let's set average header to `40kb / 2`. +pub const AVERAGE_HEADER_SIZE_IN_JUSTIFICATION: u32 = 20 * 1024; /// Approximate maximal header size on Polkadot-like chains. /// -/// We expect maximal header to have digest item with the new authorities set for every consensus -/// engine (GRANDPA, Babe, BEEFY, ...) - so we multiply it by 3. And also -/// `AVERAGE_HEADER_SIZE_IN_JUSTIFICATION` bytes for other stuff. -/// /// See [`bp_header_chain::ChainWithGrandpa`] for more details. -pub const MAX_HEADER_SIZE: u32 = MAX_AUTHORITIES_COUNT - .saturating_mul(3) - .saturating_add(AVERAGE_HEADER_SIZE_IN_JUSTIFICATION); +/// +/// This value comes from recent (February, 2023) Kusama headers. Maximal header is a mandatory +/// header. In its SCALE-encoded form it is `80348` bytes. Let's have some reserve here. +pub const MAX_HEADER_SIZE: u32 = 90_000; /// Number of extra bytes (excluding size of storage value itself) of storage proof, built at /// Polkadot-like chain. This mostly depends on number of entries in the storage trie. diff --git a/relays/bin-substrate/src/chains/millau_messages_to_rialto_parachain.rs b/relays/bin-substrate/src/chains/millau_messages_to_rialto_parachain.rs index 5dbe1d69d13..8fedd22a40a 100644 --- a/relays/bin-substrate/src/chains/millau_messages_to_rialto_parachain.rs +++ b/relays/bin-substrate/src/chains/millau_messages_to_rialto_parachain.rs @@ -18,8 +18,9 @@ use relay_millau_client::Millau; use relay_rialto_parachain_client::RialtoParachain; -use substrate_relay_helper::messages_lane::{ - DirectReceiveMessagesDeliveryProofCallBuilder, SubstrateMessageLane, +use substrate_relay_helper::{ + messages_lane::{DirectReceiveMessagesDeliveryProofCallBuilder, SubstrateMessageLane}, + UtilityPalletBatchCallBuilder, }; substrate_relay_helper::generate_receive_message_proof_call_builder!( @@ -45,6 +46,6 @@ impl SubstrateMessageLane for MillauMessagesToRialtoParachain { millau_runtime::WithRialtoParachainMessagesInstance, >; - type SourceBatchCallBuilder = (); + type SourceBatchCallBuilder = UtilityPalletBatchCallBuilder; type TargetBatchCallBuilder = (); } diff --git a/relays/client-bridge-hub-rococo/src/lib.rs b/relays/client-bridge-hub-rococo/src/lib.rs index b6b844021e0..c8963f968d0 100644 --- a/relays/client-bridge-hub-rococo/src/lib.rs +++ b/relays/client-bridge-hub-rococo/src/lib.rs @@ -132,9 +132,6 @@ impl ChainWithMessages for BridgeHubRococo { bp_bridge_hub_rococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = bp_bridge_hub_rococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; - - // TODO: fix (https://github.com/paritytech/parity-bridges-common/issues/1640) - type WeightInfo = (); } #[cfg(test)] diff --git a/relays/client-bridge-hub-wococo/src/lib.rs b/relays/client-bridge-hub-wococo/src/lib.rs index a466df35495..7f23c0d93d2 100644 --- a/relays/client-bridge-hub-wococo/src/lib.rs +++ b/relays/client-bridge-hub-wococo/src/lib.rs @@ -132,9 +132,6 @@ impl ChainWithMessages for BridgeHubWococo { bp_bridge_hub_wococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = bp_bridge_hub_wococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; - - // TODO: fix (https://github.com/paritytech/parity-bridges-common/issues/1640) - type WeightInfo = (); } #[cfg(test)] diff --git a/relays/client-millau/src/lib.rs b/relays/client-millau/src/lib.rs index 4c4c1370a6a..9df21b1d503 100644 --- a/relays/client-millau/src/lib.rs +++ b/relays/client-millau/src/lib.rs @@ -51,7 +51,6 @@ impl ChainWithMessages for Millau { bp_millau::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = bp_millau::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; - type WeightInfo = (); } impl Chain for Millau { diff --git a/relays/client-rialto-parachain/src/lib.rs b/relays/client-rialto-parachain/src/lib.rs index 2c3792725c3..654848da2a7 100644 --- a/relays/client-rialto-parachain/src/lib.rs +++ b/relays/client-rialto-parachain/src/lib.rs @@ -79,7 +79,6 @@ impl ChainWithMessages for RialtoParachain { bp_rialto_parachain::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = bp_rialto_parachain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; - type WeightInfo = (); } impl ChainWithTransactions for RialtoParachain { diff --git a/relays/client-rialto/src/lib.rs b/relays/client-rialto/src/lib.rs index 39cc2721ddb..b822dfe7d89 100644 --- a/relays/client-rialto/src/lib.rs +++ b/relays/client-rialto/src/lib.rs @@ -69,7 +69,6 @@ impl ChainWithMessages for Rialto { bp_rialto::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = bp_rialto::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; - type WeightInfo = (); } impl ChainWithBalances for Rialto { diff --git a/relays/client-substrate/src/chain.rs b/relays/client-substrate/src/chain.rs index ebd9e7172d8..b55ae71cbf8 100644 --- a/relays/client-substrate/src/chain.rs +++ b/relays/client-substrate/src/chain.rs @@ -136,9 +136,6 @@ pub trait ChainWithMessages: Chain { /// Maximal number of unconfirmed messages in a single confirmation transaction at this /// `ChainWithMessages`. const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce; - - /// Weights of message pallet calls. - type WeightInfo: pallet_bridge_messages::WeightInfoExt; } /// Call type used by the chain. diff --git a/relays/client-substrate/src/client.rs b/relays/client-substrate/src/client.rs index 1a4aeb583cd..bddd53b39f8 100644 --- a/relays/client-substrate/src/client.rs +++ b/relays/client-substrate/src/client.rs @@ -21,7 +21,6 @@ use crate::{ rpc::{ SubstrateAuthorClient, SubstrateChainClient, SubstrateFinalityClient, SubstrateFrameSystemClient, SubstrateStateClient, SubstrateSystemClient, - SubstrateTransactionPaymentClient, }, transaction_stall_timeout, AccountKeyPairOf, ConnectionParams, Error, HashOf, HeaderIdOf, Result, SignParam, TransactionTracker, UnsignedTransaction, @@ -31,15 +30,16 @@ use async_std::sync::{Arc, Mutex}; use async_trait::async_trait; use bp_runtime::{HeaderIdProvider, StorageDoubleMapKeyProvider, StorageMapKeyProvider}; use codec::{Decode, Encode}; +use frame_support::weights::Weight; use frame_system::AccountInfo; use futures::{SinkExt, StreamExt}; use jsonrpsee::{ core::DeserializeOwned, ws_client::{WsClient as RpcClient, WsClientBuilder as RpcClientBuilder}, }; -use num_traits::{Bounded, Saturating, Zero}; +use num_traits::{Saturating, Zero}; use pallet_balances::AccountData; -use pallet_transaction_payment::InclusionFee; +use pallet_transaction_payment::RuntimeDispatchInfo; use relay_utils::{relay_loop::RECONNECT_DELAY, STALL_TIMEOUT}; use sp_core::{ storage::{StorageData, StorageKey}, @@ -51,10 +51,11 @@ use sp_runtime::{ }; use sp_trie::StorageProof; use sp_version::RuntimeVersion; -use std::{convert::TryFrom, future::Future}; +use std::future::Future; const SUB_API_GRANDPA_AUTHORITIES: &str = "GrandpaApi_grandpa_authorities"; const SUB_API_TXPOOL_VALIDATE_TRANSACTION: &str = "TaggedTransactionQueue_validate_transaction"; +const SUB_API_TX_PAYMENT_QUERY_INFO: &str = "TransactionPaymentApi_query_info"; const MAX_SUBSCRIPTION_CAPACITY: usize = 4096; /// The difference between best block number and number of its ancestor, that is enough @@ -591,33 +592,24 @@ impl Client { .await } - /// Estimate fee that will be spent on given extrinsic. - pub async fn estimate_extrinsic_fee( + /// Returns weight of the given transaction. + pub async fn extimate_extrinsic_weight( &self, - transaction: Bytes, - ) -> Result> { + transaction: SignedTransaction, + ) -> Result { self.jsonrpsee_execute(move |client| async move { - let fee_details = - SubstrateTransactionPaymentClient::::fee_details(&*client, transaction, None) - .await?; - let inclusion_fee = fee_details - .inclusion_fee - .map(|inclusion_fee| InclusionFee { - base_fee: C::Balance::try_from(inclusion_fee.base_fee.into_u256()) - .unwrap_or_else(|_| C::Balance::max_value()), - len_fee: C::Balance::try_from(inclusion_fee.len_fee.into_u256()) - .unwrap_or_else(|_| C::Balance::max_value()), - adjusted_weight_fee: C::Balance::try_from( - inclusion_fee.adjusted_weight_fee.into_u256(), - ) - .unwrap_or_else(|_| C::Balance::max_value()), - }) - .unwrap_or_else(|| InclusionFee { - base_fee: Zero::zero(), - len_fee: Zero::zero(), - adjusted_weight_fee: Zero::zero(), - }); - Ok(inclusion_fee) + let transaction_len = transaction.encoded_size() as u32; + + let call = SUB_API_TX_PAYMENT_QUERY_INFO.to_string(); + let data = Bytes((transaction, transaction_len).encode()); + + let encoded_response = + SubstrateStateClient::::call(&*client, call, data, None).await?; + let dispatch_info = + RuntimeDispatchInfo::::decode(&mut &encoded_response.0[..]) + .map_err(Error::ResponseParseFailed)?; + + Ok(dispatch_info.weight) }) .await } diff --git a/relays/lib-substrate-relay/src/messages_lane.rs b/relays/lib-substrate-relay/src/messages_lane.rs index 1a4f65ff51e..0a7a3566d20 100644 --- a/relays/lib-substrate-relay/src/messages_lane.rs +++ b/relays/lib-substrate-relay/src/messages_lane.rs @@ -25,7 +25,9 @@ use crate::{ use async_std::sync::Arc; use bp_messages::{LaneId, MessageNonce}; -use bp_runtime::{AccountIdOf, Chain as _, HeaderIdOf, WeightExtraOps}; +use bp_runtime::{ + AccountIdOf, Chain as _, EncodedOrDecodedCall, HeaderIdOf, TransactionEra, WeightExtraOps, +}; use bridge_runtime_common::messages::{ source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, }; @@ -35,13 +37,15 @@ use messages_relay::{message_lane::MessageLane, message_lane_loop::BatchTransact use pallet_bridge_messages::{Call as BridgeMessagesCall, Config as BridgeMessagesConfig}; use relay_substrate_client::{ transaction_stall_timeout, AccountKeyPairOf, BalanceOf, BlockNumberOf, CallOf, Chain, - ChainWithMessages, ChainWithTransactions, Client, Error as SubstrateError, HashOf, + ChainWithMessages, ChainWithTransactions, Client, Error as SubstrateError, HashOf, SignParam, + UnsignedTransaction, }; use relay_utils::{ metrics::{GlobalMetrics, MetricsParams, StandaloneMetric}, STALL_TIMEOUT, }; use sp_core::Pair; +use sp_runtime::traits::Zero; use std::{convert::TryFrom, fmt::Debug, marker::PhantomData}; /// Substrate -> Substrate messages synchronization pipeline. @@ -159,25 +163,25 @@ where AccountIdOf: From< as Pair>::Public>, BalanceOf: TryFrom>, { - let source_client = params.source_client; - let target_client = params.target_client; - let relayer_id_at_source: AccountIdOf = - params.source_transaction_params.signer.public().into(); - // 2/3 is reserved for proofs and tx overhead let max_messages_size_in_single_batch = P::TargetChain::max_extrinsic_size() / 3; // we don't know exact weights of the Polkadot runtime. So to guess weights we'll be using // weights from Rialto and then simply dividing it by x2. let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = - crate::messages_lane::select_delivery_transaction_limits::< - ::WeightInfo, - >( + select_delivery_transaction_limits_rpc::

( + ¶ms, P::TargetChain::max_extrinsic_weight(), P::SourceChain::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, - ); + ) + .await?; let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = (max_messages_in_single_batch / 2, max_messages_weight_in_single_batch / 2); + let source_client = params.source_client; + let target_client = params.target_client; + let relayer_id_at_source: AccountIdOf = + params.source_transaction_params.signer.public().into(); + log::info!( target: "bridge", "Starting {} -> {} messages relay.\n\t\ @@ -437,12 +441,15 @@ macro_rules! generate_receive_message_delivery_proof_call_builder { }; } -/// Returns maximal number of messages and their maximal cumulative dispatch weight, based -/// on given chain parameters. -pub fn select_delivery_transaction_limits( +/// Returns maximal number of messages and their maximal cumulative dispatch weight. +async fn select_delivery_transaction_limits_rpc( + params: &MessagesRelayParams

, max_extrinsic_weight: Weight, max_unconfirmed_messages_at_inbound_lane: MessageNonce, -) -> (MessageNonce, Weight) { +) -> anyhow::Result<(MessageNonce, Weight)> +where + AccountIdOf: From< as Pair>::Public>, +{ // We may try to guess accurate value, based on maximal number of messages and per-message // weight overhead, but the relay loop isn't using this info in a super-accurate way anyway. // So just a rough guess: let's say 1/3 of max tx weight is for tx itself and the rest is @@ -455,13 +462,35 @@ pub fn select_delivery_transaction_limits(params, 0)?; + let delivery_tx_with_zero_messages_weight = params + .target_client + .extimate_extrinsic_weight(delivery_tx_with_zero_messages) + .await + .map_err(|e| { + anyhow::format_err!("Failed to estimate delivery extrinsic weight: {:?}", e) + })?; + + // weight of single message delivery with outbound lane state + let delivery_tx_with_one_message = dummy_messages_delivery_transaction::

(params, 1)?; + let delivery_tx_with_one_message_weight = params + .target_client + .extimate_extrinsic_weight(delivery_tx_with_one_message) + .await + .map_err(|e| { + anyhow::format_err!("Failed to estimate delivery extrinsic weight: {:?}", e) + })?; + + // message overhead is roughly `delivery_tx_with_one_message_weight - + // delivery_tx_with_zero_messages_weight` + let delivery_tx_weight_rest = weight_for_delivery_tx - delivery_tx_with_zero_messages_weight; + let delivery_tx_message_overhead = + delivery_tx_with_one_message_weight.saturating_sub(delivery_tx_with_zero_messages_weight); let max_number_of_messages = std::cmp::min( delivery_tx_weight_rest - .min_components_checked_div(W::receive_messages_proof_messages_overhead(1)) + .min_components_checked_div(delivery_tx_message_overhead) .unwrap_or(u64::MAX), max_unconfirmed_messages_at_inbound_lane, ); @@ -475,36 +504,58 @@ pub fn select_delivery_transaction_limits; - - #[test] - fn select_delivery_transaction_limits_is_sane() { - // we want to check the `proof_size` component here too. But for Rialto and Millau - // it is set to `u64::MAX` (as for Polkadot and other relay/standalone chains). - // So let's use RialtoParachain limits here - it has `proof_size` limit as all - // Cumulus-based parachains do. - let (max_count, max_weight) = - select_delivery_transaction_limits::( - bp_rialto_parachain::RialtoParachain::max_extrinsic_weight(), - bp_rialto::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, - ); - assert_eq!( - (max_count, max_weight), - // We don't actually care about these values, so feel free to update them whenever test - // fails. The only thing to do before that is to ensure that new values looks sane: - // i.e. weight reserved for messages dispatch allows dispatch of non-trivial messages. - // - // Any significant change in this values should attract additional attention. - (1024, Weight::from_parts(866_600_106_667, 2_271_915)), +/// Returns dummy message delivery transaction with zero messages and `1kb` proof. +fn dummy_messages_delivery_transaction( + params: &MessagesRelayParams

, + messages: u32, +) -> anyhow::Result<::SignedTransaction> +where + AccountIdOf: From< as Pair>::Public>, +{ + // we don't care about any call values here, because all that the estimation RPC does + // is calls `GetDispatchInfo::get_dispatch_info` for the wrapped call. So we only are + // interested in values that affect call weight - e.g. number of messages and the + // storage proof size + + let dummy_messages_delivery_call = + P::ReceiveMessagesProofCallBuilder::build_receive_messages_proof_call( + params.source_transaction_params.signer.public().into(), + ( + Weight::zero(), + FromBridgedChainMessagesProof { + bridged_header_hash: Default::default(), + // we may use per-chain `EXTRA_STORAGE_PROOF_SIZE`, but since we don't need + // exact values, this global estimation is fine + storage_proof: vec![vec![ + 42u8; + pallet_bridge_messages::EXTRA_STORAGE_PROOF_SIZE + as usize + ]], + lane: Default::default(), + nonces_start: 1, + nonces_end: messages as u64, + }, + ), + messages, + Weight::zero(), + false, ); - } + P::TargetChain::sign_transaction( + SignParam { + spec_version: 0, + transaction_version: 0, + genesis_hash: Default::default(), + signer: params.target_transaction_params.signer.clone(), + }, + UnsignedTransaction { + call: EncodedOrDecodedCall::Decoded(dummy_messages_delivery_call), + nonce: Zero::zero(), + tip: Zero::zero(), + era: TransactionEra::Immortal, + }, + ) + .map_err(Into::into) } diff --git a/relays/messages/src/message_race_loop.rs b/relays/messages/src/message_race_loop.rs index 2988ab231d9..3d995a0a362 100644 --- a/relays/messages/src/message_race_loop.rs +++ b/relays/messages/src/message_race_loop.rs @@ -573,17 +573,14 @@ pub async fn run, TC: TargetClient

>( if let Some((at_block, nonces_range, proof)) = race_state.nonces_to_submit.as_ref() { log::debug!( target: "bridge", - "Going to submit proof of messages in range {:?} to {} node", + "Going to submit proof of messages in range {:?} to {} node{}", nonces_range, P::target_name(), + target_batch_transaction.as_ref().map(|tx| format!( + ". This transaction is batched with sending the proof for header {:?}.", + tx.required_header_id()) + ).unwrap_or_default(), ); - if let Some(ref target_batch_transaction) = target_batch_transaction { - log::debug!( - target: "bridge", - "This transaction is batched with sending the proof for header {:?}.", - target_batch_transaction.required_header_id(), - ); - } target_submit_proof.set( race_target