diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index 06a4f0b24bb0..14fc87761c42 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -1327,6 +1327,11 @@ async fn get_approval_signatures_for_candidate( // No need to block subsystem on this (also required to break cycle). // We should not be sending this message frequently - caller must make sure this is bounded. + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + "Spawning task for fetching sinatures from approval-distribution" + ); ctx.spawn("get-approval-signatures", Box::pin(get_approvals)) } diff --git a/node/core/dispute-coordinator/src/initialized.rs b/node/core/dispute-coordinator/src/initialized.rs index 1313eb600052..245af523685f 100644 --- a/node/core/dispute-coordinator/src/initialized.rs +++ b/node/core/dispute-coordinator/src/initialized.rs @@ -807,7 +807,14 @@ impl Initialized { ); intermediate_result }, - Ok(votes) => intermediate_result.import_approval_votes(&env, votes, now), + Ok(votes) => { + gum::trace!( + target: LOG_TARGET, + count = votes.len(), + "Successfully received approval votes." + ); + intermediate_result.import_approval_votes(&env, votes, now) + }, } } else { gum::trace!( diff --git a/node/network/dispute-distribution/src/sender/mod.rs b/node/network/dispute-distribution/src/sender/mod.rs index c0cb7c1c0acc..a54033945d6f 100644 --- a/node/network/dispute-distribution/src/sender/mod.rs +++ b/node/network/dispute-distribution/src/sender/mod.rs @@ -195,7 +195,7 @@ impl DisputeSender { // recovered at startup will be relatively "old" anyway and we assume that no more than a // third of the validators will go offline at any point in time anyway. for dispute in unknown_disputes { - self.rate_limit.limit("while going through unknown disputes", dispute.1).await; + // Rate limiting handled inside `start_send_for_dispute` (calls `start_sender`). self.start_send_for_dispute(ctx, runtime, dispute).await?; } Ok(()) @@ -389,6 +389,7 @@ impl RateLimit { /// String given as occasion and candidate hash are logged in case the rate limit hit. async fn limit(&mut self, occasion: &'static str, candidate_hash: CandidateHash) { // Wait for rate limit and add some logging: + let mut num_wakes: u32 = 0; poll_fn(|cx| { let old_limit = Pin::new(&mut self.limit); match old_limit.poll(cx) { @@ -397,8 +398,10 @@ impl RateLimit { target: LOG_TARGET, ?occasion, ?candidate_hash, + ?num_wakes, "Sending rate limit hit, slowing down requests" ); + num_wakes += 1; Poll::Pending }, Poll::Ready(()) => Poll::Ready(()), diff --git a/node/network/dispute-distribution/src/sender/send_task.rs b/node/network/dispute-distribution/src/sender/send_task.rs index 89b5c099bde9..3852adbc141b 100644 --- a/node/network/dispute-distribution/src/sender/send_task.rs +++ b/node/network/dispute-distribution/src/sender/send_task.rs @@ -140,21 +140,38 @@ impl SendTask { let new_authorities = self.get_relevant_validators(ctx, runtime, active_sessions).await?; // Note this will also contain all authorities for which sending failed previously: - let add_authorities = new_authorities + let add_authorities: Vec<_> = new_authorities .iter() .filter(|a| !self.deliveries.contains_key(a)) .map(Clone::clone) .collect(); // Get rid of dead/irrelevant tasks/statuses: + gum::trace!( + target: LOG_TARGET, + already_running_deliveries = ?self.deliveries.len(), + "Cleaning up deliveries" + ); self.deliveries.retain(|k, _| new_authorities.contains(k)); // Start any new tasks that are needed: + gum::trace!( + target: LOG_TARGET, + new_and_failed_authorities = ?add_authorities.len(), + overall_authority_set_size = ?new_authorities.len(), + already_running_deliveries = ?self.deliveries.len(), + "Starting new send requests for authorities." + ); let new_statuses = send_requests(ctx, self.tx.clone(), add_authorities, self.request.clone(), metrics) .await?; let was_empty = new_statuses.is_empty(); + gum::trace!( + target: LOG_TARGET, + sent_requests = ?new_statuses.len(), + "Requests dispatched." + ); self.has_failed_sends = false; self.deliveries.extend(new_statuses.into_iter());