Skip to content

Commit

Permalink
Use LOG_TARGET in consensus related crates (paritytech#12875)
Browse files Browse the repository at this point in the history
* Use shared LOG_TARGET in consensus related crates
* Rename target from "afg" to "grandpa"
  • Loading branch information
davxy authored and ltfschoen committed Feb 22, 2023
1 parent c81565a commit 580f873
Show file tree
Hide file tree
Showing 31 changed files with 343 additions and 247 deletions.
7 changes: 4 additions & 3 deletions client/consensus/aura/src/import_queue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@

use crate::{
aura_err, authorities, find_pre_digest, slot_author, AuthorityId, CompatibilityMode, Error,
LOG_TARGET,
};
use codec::{Codec, Decode, Encode};
use log::{debug, info, trace};
Expand Down Expand Up @@ -88,7 +89,7 @@ where
.map_err(Error::Client)?
{
info!(
target: "aura",
target: LOG_TARGET,
"Slot author is equivocating at slot {} with headers {:?} and {:?}",
slot,
equivocation_proof.first_header.hash(),
Expand Down Expand Up @@ -256,7 +257,7 @@ where
block.body = Some(inner_body);
}

trace!(target: "aura", "Checked {:?}; importing.", pre_header);
trace!(target: LOG_TARGET, "Checked {:?}; importing.", pre_header);
telemetry!(
self.telemetry;
CONSENSUS_TRACE;
Expand All @@ -272,7 +273,7 @@ where
Ok((block, None))
},
CheckedHeader::Deferred(a, b) => {
debug!(target: "aura", "Checking {:?} failed; {:?}, {:?}.", hash, a, b);
debug!(target: LOG_TARGET, "Checking {:?} failed; {:?}, {:?}.", hash, a, b);
telemetry!(
self.telemetry;
CONSENSUS_DEBUG;
Expand Down
8 changes: 5 additions & 3 deletions client/consensus/aura/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,8 @@ pub use sp_consensus_aura::{
AuraApi, ConsensusLog, SlotDuration, AURA_ENGINE_ID,
};

const LOG_TARGET: &str = "aura";

type AuthorityId<P> = <P as Pair>::Public;

/// Run `AURA` in a compatibility mode.
Expand Down Expand Up @@ -530,7 +532,7 @@ where
}

fn aura_err<B: BlockT>(error: Error<B>) -> Error<B> {
debug!(target: "aura", "{}", error);
debug!(target: LOG_TARGET, "{}", error);
error
}

Expand Down Expand Up @@ -580,10 +582,10 @@ pub fn find_pre_digest<B: BlockT, Signature: Codec>(header: &B::Header) -> Resul

let mut pre_digest: Option<Slot> = None;
for log in header.digest().logs() {
trace!(target: "aura", "Checking log {:?}", log);
trace!(target: LOG_TARGET, "Checking log {:?}", log);
match (CompatibleDigestItem::<Signature>::as_aura_pre_digest(log), pre_digest.is_some()) {
(Some(_), true) => return Err(aura_err(Error::MultipleHeaders)),
(None, _) => trace!(target: "aura", "Ignoring digest not meant for us"),
(None, _) => trace!(target: LOG_TARGET, "Ignoring digest not meant for us"),
(s, false) => pre_digest = s,
}
}
Expand Down
4 changes: 2 additions & 2 deletions client/consensus/babe/src/aux_schema.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
use codec::{Decode, Encode};
use log::info;

use crate::{migration::EpochV0, Epoch};
use crate::{migration::EpochV0, Epoch, LOG_TARGET};
use sc_client_api::backend::AuxStore;
use sc_consensus_epochs::{
migration::{EpochChangesV0For, EpochChangesV1For},
Expand Down Expand Up @@ -82,7 +82,7 @@ pub fn load_epoch_changes<Block: BlockT, B: AuxStore>(
let epoch_changes =
SharedEpochChanges::<Block, Epoch>::new(maybe_epoch_changes.unwrap_or_else(|| {
info!(
target: "babe",
target: LOG_TARGET,
"👶 Creating empty BABE epoch changes on what appears to be first startup.",
);
EpochChangesFor::<Block, Epoch>::default()
Expand Down
86 changes: 48 additions & 38 deletions client/consensus/babe/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,8 @@ pub mod aux_schema;
#[cfg(test)]
mod tests;

const LOG_TARGET: &str = "babe";

/// BABE epoch information
#[derive(Decode, Encode, PartialEq, Eq, Clone, Debug)]
pub struct Epoch {
Expand Down Expand Up @@ -323,7 +325,7 @@ impl<B: BlockT> From<Error<B>> for String {
}

fn babe_err<B: BlockT>(error: Error<B>) -> Error<B> {
debug!(target: "babe", "{}", error);
debug!(target: LOG_TARGET, "{}", error);
error
}

Expand All @@ -345,7 +347,7 @@ where
let block_id = if client.usage_info().chain.finalized_state.is_some() {
BlockId::Hash(client.usage_info().chain.best_hash)
} else {
debug!(target: "babe", "No finalized state is available. Reading config from genesis");
debug!(target: LOG_TARGET, "No finalized state is available. Reading config from genesis");
BlockId::Hash(client.usage_info().chain.genesis_hash)
};

Expand Down Expand Up @@ -486,7 +488,7 @@ where
telemetry,
};

info!(target: "babe", "👶 Starting BABE Authorship worker");
info!(target: LOG_TARGET, "👶 Starting BABE Authorship worker");

let slot_worker = sc_consensus_slots::start_slot_worker(
babe_link.config.slot_duration(),
Expand Down Expand Up @@ -523,12 +525,8 @@ fn aux_storage_cleanup<C: HeaderMetadata<Block> + HeaderBackend<Block>, Block: B
Ok(meta) => {
hashes.insert(meta.parent);
},
Err(err) => warn!(
target: "babe",
"Failed to lookup metadata for block `{:?}`: {}",
first,
err,
),
Err(err) =>
warn!(target: LOG_TARGET, "Failed to lookup metadata for block `{:?}`: {}", first, err,),
}

// Cleans data for finalized block's ancestors
Expand Down Expand Up @@ -716,7 +714,7 @@ where
type AuxData = ViableEpochDescriptor<B::Hash, NumberFor<B>, Epoch>;

fn logging_target(&self) -> &'static str {
"babe"
LOG_TARGET
}

fn block_import(&mut self) -> &mut Self::BlockImport {
Expand Down Expand Up @@ -749,7 +747,7 @@ where
slot: Slot,
epoch_descriptor: &ViableEpochDescriptor<B::Hash, NumberFor<B>, Epoch>,
) -> Option<Self::Claim> {
debug!(target: "babe", "Attempting to claim slot {}", slot);
debug!(target: LOG_TARGET, "Attempting to claim slot {}", slot);
let s = authorship::claim_slot(
slot,
self.epoch_changes
Expand All @@ -760,7 +758,7 @@ where
);

if s.is_some() {
debug!(target: "babe", "Claimed slot {}", slot);
debug!(target: LOG_TARGET, "Claimed slot {}", slot);
}

s
Expand All @@ -777,7 +775,7 @@ where
Ok(()) => true,
Err(e) =>
if e.is_full() {
warn!(target: "babe", "Trying to notify a slot but the channel is full");
warn!(target: LOG_TARGET, "Trying to notify a slot but the channel is full");
true
} else {
false
Expand Down Expand Up @@ -904,10 +902,10 @@ pub fn find_pre_digest<B: BlockT>(header: &B::Header) -> Result<PreDigest, Error

let mut pre_digest: Option<_> = None;
for log in header.digest().logs() {
trace!(target: "babe", "Checking log {:?}, looking for pre runtime digest", log);
trace!(target: LOG_TARGET, "Checking log {:?}, looking for pre runtime digest", log);
match (log.as_babe_pre_digest(), pre_digest.is_some()) {
(Some(_), true) => return Err(babe_err(Error::MultiplePreRuntimeDigests)),
(None, _) => trace!(target: "babe", "Ignoring digest not meant for us"),
(None, _) => trace!(target: LOG_TARGET, "Ignoring digest not meant for us"),
(s, false) => pre_digest = s,
}
}
Expand All @@ -920,13 +918,13 @@ fn find_next_epoch_digest<B: BlockT>(
) -> Result<Option<NextEpochDescriptor>, Error<B>> {
let mut epoch_digest: Option<_> = None;
for log in header.digest().logs() {
trace!(target: "babe", "Checking log {:?}, looking for epoch change digest.", log);
trace!(target: LOG_TARGET, "Checking log {:?}, looking for epoch change digest.", log);
let log = log.try_to::<ConsensusLog>(OpaqueDigestItemId::Consensus(&BABE_ENGINE_ID));
match (log, epoch_digest.is_some()) {
(Some(ConsensusLog::NextEpochData(_)), true) =>
return Err(babe_err(Error::MultipleEpochChangeDigests)),
(Some(ConsensusLog::NextEpochData(epoch)), false) => epoch_digest = Some(epoch),
_ => trace!(target: "babe", "Ignoring digest not meant for us"),
_ => trace!(target: LOG_TARGET, "Ignoring digest not meant for us"),
}
}

Expand All @@ -939,13 +937,13 @@ fn find_next_config_digest<B: BlockT>(
) -> Result<Option<NextConfigDescriptor>, Error<B>> {
let mut config_digest: Option<_> = None;
for log in header.digest().logs() {
trace!(target: "babe", "Checking log {:?}, looking for epoch change digest.", log);
trace!(target: LOG_TARGET, "Checking log {:?}, looking for epoch change digest.", log);
let log = log.try_to::<ConsensusLog>(OpaqueDigestItemId::Consensus(&BABE_ENGINE_ID));
match (log, config_digest.is_some()) {
(Some(ConsensusLog::NextConfigData(_)), true) =>
return Err(babe_err(Error::MultipleConfigChangeDigests)),
(Some(ConsensusLog::NextConfigData(config)), false) => config_digest = Some(config),
_ => trace!(target: "babe", "Ignoring digest not meant for us"),
_ => trace!(target: LOG_TARGET, "Ignoring digest not meant for us"),
}
}

Expand Down Expand Up @@ -1075,7 +1073,10 @@ where
None => match generate_key_owner_proof(&best_id)? {
Some(proof) => proof,
None => {
debug!(target: "babe", "Equivocation offender is not part of the authority set.");
debug!(
target: LOG_TARGET,
"Equivocation offender is not part of the authority set."
);
return Ok(())
},
},
Expand All @@ -1091,7 +1092,7 @@ where
)
.map_err(Error::RuntimeApi)?;

info!(target: "babe", "Submitted equivocation report for author {:?}", author);
info!(target: LOG_TARGET, "Submitted equivocation report for author {:?}", author);

Ok(())
}
Expand Down Expand Up @@ -1121,7 +1122,7 @@ where
mut block: BlockImportParams<Block, ()>,
) -> BlockVerificationResult<Block> {
trace!(
target: "babe",
target: LOG_TARGET,
"Verifying origin: {:?} header: {:?} justification(s): {:?} body: {:?}",
block.origin,
block.header,
Expand All @@ -1140,7 +1141,11 @@ where
return Ok((block, Default::default()))
}

debug!(target: "babe", "We have {:?} logs in this header", block.header.digest().logs().len());
debug!(
target: LOG_TARGET,
"We have {:?} logs in this header",
block.header.digest().logs().len()
);

let create_inherent_data_providers = self
.create_inherent_data_providers
Expand Down Expand Up @@ -1204,7 +1209,10 @@ where
)
.await
{
warn!(target: "babe", "Error checking/reporting BABE equivocation: {}", err);
warn!(
target: LOG_TARGET,
"Error checking/reporting BABE equivocation: {}", err
);
}

if let Some(inner_body) = block.body {
Expand Down Expand Up @@ -1233,7 +1241,7 @@ where
block.body = Some(inner_body);
}

trace!(target: "babe", "Checked {:?}; importing.", pre_header);
trace!(target: LOG_TARGET, "Checked {:?}; importing.", pre_header);
telemetry!(
self.telemetry;
CONSENSUS_TRACE;
Expand All @@ -1252,7 +1260,7 @@ where
Ok((block, Default::default()))
},
CheckedHeader::Deferred(a, b) => {
debug!(target: "babe", "Checking {:?} failed; {:?}, {:?}.", hash, a, b);
debug!(target: LOG_TARGET, "Checking {:?} failed; {:?}, {:?}.", hash, a, b);
telemetry!(
self.telemetry;
CONSENSUS_DEBUG;
Expand Down Expand Up @@ -1520,21 +1528,23 @@ where
log::Level::Info
};

log!(target: "babe",
log_level,
"👶 New epoch {} launching at block {} (block slot {} >= start slot {}).",
viable_epoch.as_ref().epoch_index,
hash,
slot,
viable_epoch.as_ref().start_slot,
log!(
target: LOG_TARGET,
log_level,
"👶 New epoch {} launching at block {} (block slot {} >= start slot {}).",
viable_epoch.as_ref().epoch_index,
hash,
slot,
viable_epoch.as_ref().start_slot,
);

let next_epoch = viable_epoch.increment((next_epoch_descriptor, epoch_config));

log!(target: "babe",
log_level,
"👶 Next epoch starts at slot {}",
next_epoch.as_ref().start_slot,
log!(
target: LOG_TARGET,
log_level,
"👶 Next epoch starts at slot {}",
next_epoch.as_ref().start_slot,
);

// prune the tree of epochs not part of the finalized chain or
Expand Down Expand Up @@ -1565,7 +1575,7 @@ where
};

if let Err(e) = prune_and_import() {
debug!(target: "babe", "Failed to launch next epoch: {}", e);
debug!(target: LOG_TARGET, "Failed to launch next epoch: {}", e);
*epoch_changes =
old_epoch_changes.expect("set `Some` above and not taken; qed");
return Err(e)
Expand Down
10 changes: 5 additions & 5 deletions client/consensus/babe/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -323,7 +323,7 @@ impl TestNetFactory for BabeTestNet {
use substrate_test_runtime_client::DefaultTestClientBuilderExt;

let client = client.as_client();
trace!(target: "babe", "Creating a verifier");
trace!(target: LOG_TARGET, "Creating a verifier");

// ensure block import and verifier are linked correctly.
let data = maybe_link
Expand Down Expand Up @@ -352,12 +352,12 @@ impl TestNetFactory for BabeTestNet {
}

fn peer(&mut self, i: usize) -> &mut BabePeer {
trace!(target: "babe", "Retrieving a peer");
trace!(target: LOG_TARGET, "Retrieving a peer");
&mut self.peers[i]
}

fn peers(&self) -> &Vec<BabePeer> {
trace!(target: "babe", "Retrieving peers");
trace!(target: LOG_TARGET, "Retrieving peers");
&self.peers
}

Expand Down Expand Up @@ -583,7 +583,7 @@ fn can_author_block() {
// with secondary slots enabled it should never be empty
match claim_slot(i.into(), &epoch, &keystore) {
None => i += 1,
Some(s) => debug!(target: "babe", "Authored block {:?}", s.0),
Some(s) => debug!(target: LOG_TARGET, "Authored block {:?}", s.0),
}

// otherwise with only vrf-based primary slots we might need to try a couple
Expand All @@ -593,7 +593,7 @@ fn can_author_block() {
match claim_slot(i.into(), &epoch, &keystore) {
None => i += 1,
Some(s) => {
debug!(target: "babe", "Authored block {:?}", s.0);
debug!(target: LOG_TARGET, "Authored block {:?}", s.0);
break
},
}
Expand Down
Loading

0 comments on commit 580f873

Please sign in to comment.