diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 248f79dd59e..ef6d409affd 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -19,7 +19,6 @@ extern crate core; #[cfg(not(feature = "std"))] extern crate alloc; -#[macro_use] extern crate lightning; extern crate lightning_rapid_gossip_sync; use lightning::chain; @@ -37,6 +36,7 @@ use lightning::routing::router::Router; use lightning::routing::scoring::{Score, WriteableScore}; use lightning::util::logger::Logger; use lightning::util::persist::Persister; +use lightning::{_log_error as log_error, _log_trace as log_trace}; #[cfg(feature = "std")] use lightning::util::wakers::Sleeper; use lightning_rapid_gossip_sync::RapidGossipSync; diff --git a/lightning-invoice/src/utils.rs b/lightning-invoice/src/utils.rs index d8e7bf12726..13a3e7a74e4 100644 --- a/lightning-invoice/src/utils.rs +++ b/lightning-invoice/src/utils.rs @@ -15,6 +15,7 @@ use lightning::ln::inbound_payment::{create, create_from_hash, ExpandedKey}; use lightning::routing::gossip::RoutingFees; use lightning::routing::router::{RouteHint, RouteHintHop, Router}; use lightning::util::logger::Logger; +use lightning::_log_trace_with_context as log_trace; use secp256k1::PublicKey; use core::ops::Deref; use core::time::Duration; @@ -189,8 +190,7 @@ where ) .map_err(|_| SignOrCreationError::CreationError(CreationError::InvalidAmount))? }; - - log_trace!(logger, "Creating phantom invoice from {} participating nodes with payment hash {}", + log_trace!(logger, None, None, "Creating phantom invoice from {} participating nodes with payment hash {}", phantom_route_hints.len(), log_bytes!(payment_hash.0)); let mut invoice = invoice @@ -240,7 +240,7 @@ where let mut phantom_hints: Vec<_> = Vec::new(); for PhantomRouteHints { channels, phantom_scid, real_node_pubkey } in phantom_route_hints { - log_trace!(logger, "Generating phantom route hints for node {}", + log_trace!(logger, None, None, "Generating phantom route hints for node {}", log_pubkey!(real_node_pubkey)); let route_hints = sort_and_filter_channels(channels, amt_msat, &logger); @@ -534,7 +534,7 @@ fn _create_invoice_from_channelmanager_and_duration_since_epoch_with_payment_has return Err(SignOrCreationError::CreationError(CreationError::MinFinalCltvExpiryDeltaTooShort)); } - log_trace!(logger, "Creating invoice with payment hash {}", log_bytes!(payment_hash.0)); + log_trace!(logger, None, None, "Creating invoice with payment hash {}", log_bytes!(payment_hash.0)); let invoice = match description { InvoiceDescription::Direct(description) => { @@ -624,10 +624,10 @@ where htlc_maximum_msat: channel.inbound_htlc_maximum_msat,}]) }; - log_trace!(logger, "Considering {} channels for invoice route hints", channels.len()); + log_trace!(logger, None, None, "Considering {} channels for invoice route hints", channels.len()); for channel in channels.into_iter().filter(|chan| chan.is_channel_ready) { if channel.get_inbound_payment_scid().is_none() || channel.counterparty.forwarding_info.is_none() { - log_trace!(logger, "Ignoring channel {} for invoice route hints", log_bytes!(channel.channel_id)); + log_trace!(logger, Some(channel.counterparty.node_id), Some(channel.channel_id), "Ignoring channel {} for invoice route hints", log_bytes!(channel.channel_id)); continue; } @@ -640,7 +640,7 @@ where } else { // If any public channel exists, return no hints and let the sender // look at the public channels instead. - log_trace!(logger, "Not including channels in invoice route hints on account of public channel {}", + log_trace!(logger, Some(channel.counterparty.node_id), Some(channel.channel_id), "Not including channels in invoice route hints on account of public channel {}", log_bytes!(channel.channel_id)); return vec![].into_iter().take(MAX_CHANNEL_HINTS).map(route_hint_from_channel); } @@ -648,7 +648,7 @@ where if channel.inbound_capacity_msat >= min_inbound_capacity { if !min_capacity_channel_exists { - log_trace!(logger, "Channel with enough inbound capacity exists for invoice route hints"); + log_trace!(logger, Some(channel.counterparty.node_id), Some(channel.channel_id), "Channel with enough inbound capacity exists for invoice route hints"); min_capacity_channel_exists = true; } @@ -658,7 +658,7 @@ where } if channel.is_usable && !online_channel_exists { - log_trace!(logger, "Channel with connected peer exists for invoice route hints"); + log_trace!(logger, Some(channel.counterparty.node_id), Some(channel.channel_id), "Channel with connected peer exists for invoice route hints"); online_channel_exists = true; } @@ -678,7 +678,7 @@ where let new_channel_preferable = channel.is_public == entry.get().is_public && !prefer_current; if new_now_public || new_channel_preferable { - log_trace!(logger, + log_trace!(logger, None, None, "Preferring counterparty {} channel {} (SCID {:?}, {} msats) over {} (SCID {:?}, {} msats) for invoice route hints", log_pubkey!(channel.counterparty.node_id), log_bytes!(channel.channel_id), channel.short_channel_id, @@ -687,7 +687,7 @@ where current_max_capacity); entry.insert(channel); } else { - log_trace!(logger, + log_trace!(logger, None, None, "Preferring counterparty {} channel {} (SCID {:?}, {} msats) over {} (SCID {:?}, {} msats) for invoice route hints", log_pubkey!(channel.counterparty.node_id), log_bytes!(entry.get().channel_id), entry.get().short_channel_id, @@ -730,14 +730,14 @@ where } else { true }; if include_channel { - log_trace!(logger, "Including channel {} in invoice route hints", + log_trace!(logger, Some(channel.counterparty.node_id), Some(channel.channel_id), "Including channel {} in invoice route hints", log_bytes!(channel.channel_id)); } else if !has_enough_capacity { - log_trace!(logger, "Ignoring channel {} without enough capacity for invoice route hints", + log_trace!(logger, Some(channel.counterparty.node_id), Some(channel.channel_id), "Ignoring channel {} without enough capacity for invoice route hints", log_bytes!(channel.channel_id)); } else { debug_assert!(!channel.is_usable || (has_pub_unconf_chan && !channel.is_public)); - log_trace!(logger, "Ignoring channel {} with disconnected peer", + log_trace!(logger, Some(channel.counterparty.node_id), Some(channel.channel_id), "Ignoring channel {} with disconnected peer", log_bytes!(channel.channel_id)); } diff --git a/lightning-rapid-gossip-sync/src/processing.rs b/lightning-rapid-gossip-sync/src/processing.rs index fca318a5fa5..d9e25882eae 100644 --- a/lightning-rapid-gossip-sync/src/processing.rs +++ b/lightning-rapid-gossip-sync/src/processing.rs @@ -10,7 +10,8 @@ use lightning::ln::msgs::{ }; use lightning::routing::gossip::NetworkGraph; use lightning::util::logger::Logger; -use lightning::{log_debug, log_warn, log_trace, log_given_level, log_gossip}; + +use lightning::{_log_debug as log_debug, _log_trace as log_trace, _log_warn as log_warn, _log_gossip as log_gossip, _log_given_level as log_given_level}; use lightning::util::ser::{BigSize, Readable}; use lightning::io; diff --git a/lightning-transaction-sync/src/esplora.rs b/lightning-transaction-sync/src/esplora.rs index f68be08a5a6..7dfcd4afe7e 100644 --- a/lightning-transaction-sync/src/esplora.rs +++ b/lightning-transaction-sync/src/esplora.rs @@ -2,7 +2,7 @@ use crate::error::{TxSyncError, InternalError}; use crate::common::{SyncState, FilterQueue, ConfirmedTx}; use lightning::util::logger::Logger; -use lightning::{log_error, log_info, log_debug, log_trace}; +use lightning::{_log_info as log_info, _log_debug as log_debug, _log_error as log_error ,_log_trace as log_trace}; use lightning::chain::WatchedOutput; use lightning::chain::{Confirm, Filter}; diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 261e5593b5b..c5cf83d29ba 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -46,6 +46,8 @@ use core::ops::Deref; use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use bitcoin::secp256k1::PublicKey; +use crate::util::macro_logger::default_logging::*; + #[derive(Clone, Copy, Hash, PartialEq, Eq)] /// A specific update's ID stored in a `MonitorUpdateId`, separated out to make the contents /// entirely opaque. diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index a48f169a4d5..97be819188a 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -61,6 +61,7 @@ use crate::io::{self, Error}; use core::convert::TryInto; use core::ops::Deref; use crate::sync::{Mutex, LockTestExt}; +use crate::util::macro_logger::context_logging::*; /// An update generated by the underlying channel itself which contains some new information the /// [`ChannelMonitor`] should be made aware of. @@ -2039,7 +2040,7 @@ macro_rules! fail_unbroadcast_htlcs { commitment_tx_output_idx: None, }, }; - log_trace!($logger, "Failing HTLC with payment_hash {} from {} counterparty commitment tx due to broadcast of {} commitment transaction {}, waiting for confirmation (at height {})", + log_trace!($logger, None, None, "Failing HTLC with payment_hash {} from {} counterparty commitment tx due to broadcast of {} commitment transaction {}, waiting for confirmation (at height {})", log_bytes!(htlc.payment_hash.0), $commitment_tx, $commitment_tx_type, $commitment_txid_confirmed, entry.confirmation_threshold()); $self.onchain_events_awaiting_threshold_conf.push(entry); @@ -2153,7 +2154,7 @@ impl ChannelMonitorImpl { self.counterparty_hash_commitment_number.insert(htlc.payment_hash, commitment_number); } - log_trace!(logger, "Tracking new counterparty commitment transaction with txid {} at commitment number {} with {} HTLC outputs", txid, commitment_number, htlc_outputs.len()); + log_trace!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Tracking new counterparty commitment transaction with txid {} at commitment number {} with {} HTLC outputs", txid, commitment_number, htlc_outputs.len()); self.prev_counterparty_commitment_txid = self.current_counterparty_commitment_txid.take(); self.current_counterparty_commitment_txid = Some(txid); self.counterparty_claimable_outpoints.insert(txid, htlc_outputs.clone()); @@ -2330,7 +2331,7 @@ impl ChannelMonitorImpl { let commit_txs = self.get_latest_holder_commitment_txn(logger); let mut txs = vec![]; for tx in commit_txs.iter() { - log_info!(logger, "Broadcasting local {}", log_tx!(tx)); + log_info!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Broadcasting local {}", log_tx!(tx)); txs.push(tx); } broadcaster.broadcast_transactions(&txs); @@ -2343,13 +2344,13 @@ impl ChannelMonitorImpl { L::Target: Logger, { if self.latest_update_id == CLOSED_CHANNEL_UPDATE_ID && updates.update_id == CLOSED_CHANNEL_UPDATE_ID { - log_info!(logger, "Applying post-force-closed update to monitor {} with {} change(s).", + log_info!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Applying post-force-closed update to monitor {} with {} change(s).", log_funding_info!(self), updates.updates.len()); } else if updates.update_id == CLOSED_CHANNEL_UPDATE_ID { - log_info!(logger, "Applying force close update to monitor {} with {} change(s).", + log_info!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Applying force close update to monitor {} with {} change(s).", log_funding_info!(self), updates.updates.len()); } else { - log_info!(logger, "Applying update to monitor {}, bringing update_id from {} to {} with {} change(s).", + log_info!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Applying update to monitor {}, bringing update_id from {} to {} with {} change(s).", log_funding_info!(self), self.latest_update_id, updates.update_id, updates.updates.len()); } // ChannelMonitor updates may be applied after force close if we receive a preimage for a @@ -2369,7 +2370,7 @@ impl ChannelMonitorImpl { ChannelMonitorUpdateStep::PaymentPreimage { .. } => debug_assert_eq!(self.latest_update_id, CLOSED_CHANNEL_UPDATE_ID), _ => { - log_error!(logger, "Attempted to apply post-force-close ChannelMonitorUpdate of type {}", updates.updates[0].variant_name()); + log_error!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Attempted to apply post-force-close ChannelMonitorUpdate of type {}", updates.updates[0].variant_name()); panic!("Attempted to apply post-force-close ChannelMonitorUpdate that wasn't providing a payment preimage"); }, } @@ -2381,32 +2382,32 @@ impl ChannelMonitorImpl { for update in updates.updates.iter() { match update { ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { commitment_tx, htlc_outputs, claimed_htlcs, nondust_htlc_sources } => { - log_trace!(logger, "Updating ChannelMonitor with latest holder commitment transaction info"); + log_trace!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Updating ChannelMonitor with latest holder commitment transaction info"); if self.lockdown_from_offchain { panic!(); } if let Err(e) = self.provide_latest_holder_commitment_tx(commitment_tx.clone(), htlc_outputs.clone(), &claimed_htlcs, nondust_htlc_sources.clone()) { - log_error!(logger, "Providing latest holder commitment transaction failed/was refused:"); - log_error!(logger, " {}", e); + log_error!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Providing latest holder commitment transaction failed/was refused:"); + log_error!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), " {}", e); ret = Err(()); } } ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { commitment_txid, htlc_outputs, commitment_number, their_per_commitment_point } => { - log_trace!(logger, "Updating ChannelMonitor with latest counterparty commitment transaction info"); + log_trace!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Updating ChannelMonitor with latest counterparty commitment transaction info"); self.provide_latest_counterparty_commitment_tx(*commitment_txid, htlc_outputs.clone(), *commitment_number, *their_per_commitment_point, logger) }, ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage } => { - log_trace!(logger, "Updating ChannelMonitor with payment preimage"); + log_trace!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Updating ChannelMonitor with payment preimage"); self.provide_payment_preimage(&PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner()), &payment_preimage, broadcaster, &bounded_fee_estimator, logger) }, ChannelMonitorUpdateStep::CommitmentSecret { idx, secret } => { - log_trace!(logger, "Updating ChannelMonitor with commitment secret"); + log_trace!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Updating ChannelMonitor with commitment secret"); if let Err(e) = self.provide_secret(*idx, *secret) { - log_error!(logger, "Providing latest counterparty commitment secret failed/was refused:"); - log_error!(logger, " {}", e); + log_error!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Providing latest counterparty commitment secret failed/was refused:"); + log_error!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), " {}", e); ret = Err(()); } }, ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } => { - log_trace!(logger, "Updating ChannelMonitor: channel force closed, should broadcast: {}", should_broadcast); + log_trace!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Updating ChannelMonitor: channel force closed, should broadcast: {}", should_broadcast); self.lockdown_from_offchain = true; if *should_broadcast { // There's no need to broadcast our commitment transaction if we've seen one @@ -2418,7 +2419,7 @@ impl ChannelMonitorImpl { _ => false, }).is_some(); if detected_funding_spend { - log_trace!(logger, "Avoiding commitment broadcast, already detected confirmed spend onchain"); + log_trace!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Avoiding commitment broadcast, already detected confirmed spend onchain"); continue; } self.broadcast_latest_holder_commitment_txn(broadcaster, logger); @@ -2442,18 +2443,18 @@ impl ChannelMonitorImpl { ); } } else if !self.holder_tx_signed { - log_error!(logger, "WARNING: You have a potentially-unsafe holder commitment transaction available to broadcast"); - log_error!(logger, " in channel monitor for channel {}!", log_bytes!(self.funding_info.0.to_channel_id())); - log_error!(logger, " Read the docs for ChannelMonitor::get_latest_holder_commitment_txn and take manual action!"); + log_error!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "WARNING: You have a potentially-unsafe holder commitment transaction available to broadcast"); + log_error!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), " in channel monitor for channel {}!", log_bytes!(self.funding_info.0.to_channel_id())); + log_error!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), " Read the docs for ChannelMonitor::get_latest_holder_commitment_txn and take manual action!"); } else { // If we generated a MonitorEvent::CommitmentTxConfirmed, the ChannelManager // will still give us a ChannelForceClosed event with !should_broadcast, but we // shouldn't print the scary warning above. - log_info!(logger, "Channel off-chain state closed after we broadcasted our latest commitment transaction."); + log_info!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Channel off-chain state closed after we broadcasted our latest commitment transaction."); } }, ChannelMonitorUpdateStep::ShutdownScript { scriptpubkey } => { - log_trace!(logger, "Updating ChannelMonitor with shutdown script"); + log_trace!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Updating ChannelMonitor with shutdown script"); if let Some(shutdown_script) = self.shutdown_script.replace(scriptpubkey.clone()) { panic!("Attempted to replace shutdown script {} with {}", shutdown_script, scriptpubkey); } @@ -2472,7 +2473,7 @@ impl ChannelMonitorImpl { // Refuse updates after we've detected a spend onchain, but only if we haven't processed a // force closed monitor update yet. if ret.is_ok() && self.funding_spend_seen && self.latest_update_id != CLOSED_CHANNEL_UPDATE_ID { - log_error!(logger, "Refusing Channel Monitor Update as counterparty attempted to update commitment after funding was spent"); + log_error!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Refusing Channel Monitor Update as counterparty attempted to update commitment after funding was spent"); Err(()) } else { ret } } @@ -2645,7 +2646,7 @@ impl ChannelMonitorImpl { // Last, track onchain revoked commitment transaction and fail backward outgoing HTLCs as payment path is broken if !claimable_outpoints.is_empty() || per_commitment_option.is_some() { // ie we're confident this is actually ours // We're definitely a counterparty commitment transaction! - log_error!(logger, "Got broadcast of revoked counterparty commitment transaction, going to generate general spend tx with {} inputs", claimable_outpoints.len()); + log_error!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Got broadcast of revoked counterparty commitment transaction, going to generate general spend tx with {} inputs", claimable_outpoints.len()); for (idx, outp) in tx.output.iter().enumerate() { watch_outputs.push((idx as u32, outp.clone())); } @@ -2675,7 +2676,7 @@ impl ChannelMonitorImpl { } self.counterparty_commitment_txn_on_chain.insert(commitment_txid, commitment_number); - log_info!(logger, "Got broadcast of non-revoked counterparty commitment transaction {}", commitment_txid); + log_info!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Got broadcast of non-revoked counterparty commitment transaction {}", commitment_txid); fail_unbroadcast_htlcs!(self, "counterparty", commitment_txid, tx, height, block_hash, per_commitment_data.iter().map(|(htlc, htlc_source)| (htlc, htlc_source.as_ref().map(|htlc_source| htlc_source.as_ref())) @@ -2796,7 +2797,7 @@ impl ChannelMonitorImpl { // have a corresponding output at the same index within the transaction. for (idx, input) in tx.input.iter().enumerate() { if input.previous_output.txid == *commitment_txid && input.witness.len() == 5 && tx.output.get(idx).is_some() { - log_error!(logger, "Got broadcast of revoked counterparty HTLC transaction, spending {}:{}", htlc_txid, idx); + log_error!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Got broadcast of revoked counterparty HTLC transaction, spending {}:{}", htlc_txid, idx); let revk_outp = RevokedOutput::build( per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key, @@ -2890,7 +2891,7 @@ impl ChannelMonitorImpl { if self.current_holder_commitment_tx.txid == commitment_txid { is_holder_tx = true; - log_info!(logger, "Got broadcast of latest holder commitment tx {}, searching for available HTLCs to claim", commitment_txid); + log_info!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Got broadcast of latest holder commitment tx {}, searching for available HTLCs to claim", commitment_txid); let res = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, height); let mut to_watch = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, tx); append_onchain_update!(res, to_watch); @@ -2900,7 +2901,7 @@ impl ChannelMonitorImpl { } else if let &Some(ref holder_tx) = &self.prev_holder_signed_commitment_tx { if holder_tx.txid == commitment_txid { is_holder_tx = true; - log_info!(logger, "Got broadcast of previous holder commitment tx {}, searching for available HTLCs to claim", commitment_txid); + log_info!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Got broadcast of previous holder commitment tx {}, searching for available HTLCs to claim", commitment_txid); let res = self.get_broadcasted_holder_claims(holder_tx, height); let mut to_watch = self.get_broadcasted_holder_watch_outputs(holder_tx, tx); append_onchain_update!(res, to_watch); @@ -2918,7 +2919,7 @@ impl ChannelMonitorImpl { } pub fn get_latest_holder_commitment_txn(&mut self, logger: &L) -> Vec where L::Target: Logger { - log_debug!(logger, "Getting signed latest holder commitment transaction!"); + log_debug!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Getting signed latest holder commitment transaction!"); self.holder_tx_signed = true; let commitment_tx = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript); let txid = commitment_tx.txid(); @@ -2957,7 +2958,7 @@ impl ChannelMonitorImpl { #[cfg(any(test,feature = "unsafe_revoked_tx_signing"))] /// Note that this includes possibly-locktimed-in-the-future transactions! fn unsafe_get_latest_holder_commitment_txn(&mut self, logger: &L) -> Vec where L::Target: Logger { - log_debug!(logger, "Getting signed copy of latest holder commitment transaction!"); + log_debug!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Getting signed copy of latest holder commitment transaction!"); let commitment_tx = self.onchain_tx_handler.get_fully_signed_copy_holder_tx(&self.funding_redeemscript); let txid = commitment_tx.txid(); let mut holder_transactions = vec![commitment_tx]; @@ -3053,7 +3054,7 @@ impl ChannelMonitorImpl { let txid = tx.txid(); // If a transaction has already been confirmed, ensure we don't bother processing it duplicatively. if Some(txid) == self.funding_spend_confirmed { - log_debug!(logger, "Skipping redundant processing of funding-spend tx {} as it was previously confirmed", txid); + log_debug!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Skipping redundant processing of funding-spend tx {} as it was previously confirmed", txid); continue 'tx_iter; } for ev in self.onchain_events_awaiting_threshold_conf.iter() { @@ -3063,19 +3064,19 @@ impl ChannelMonitorImpl { "Transaction {} was already confirmed and is being re-confirmed in a different block.\n\ This indicates a severe bug in the transaction connection logic - a reorg should have been processed first!", ev.txid); } - log_debug!(logger, "Skipping redundant processing of confirming tx {} as it was previously confirmed", txid); + log_debug!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Skipping redundant processing of confirming tx {} as it was previously confirmed", txid); continue 'tx_iter; } } for htlc in self.htlcs_resolved_on_chain.iter() { if Some(txid) == htlc.resolving_txid { - log_debug!(logger, "Skipping redundant processing of HTLC resolution tx {} as it was previously confirmed", txid); + log_debug!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Skipping redundant processing of HTLC resolution tx {} as it was previously confirmed", txid); continue 'tx_iter; } } for spendable_txid in self.spendable_txids_confirmed.iter() { if txid == *spendable_txid { - log_debug!(logger, "Skipping redundant processing of spendable tx {} as it was previously confirmed", txid); + log_debug!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Skipping redundant processing of spendable tx {} as it was previously confirmed", txid); continue 'tx_iter; } } @@ -3088,7 +3089,7 @@ impl ChannelMonitorImpl { let prevout = &tx.input[0].previous_output; if prevout.txid == self.funding_info.0.txid && prevout.vout == self.funding_info.0.index as u32 { let mut balance_spendable_csv = None; - log_info!(logger, "Channel {} closed by funding output spend in txid {}.", + log_info!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Channel {} closed by funding output spend in txid {}.", log_bytes!(self.funding_info.0.to_channel_id()), txid); self.funding_spend_seen = true; let mut commitment_tx_to_counterparty_output = None; @@ -3182,7 +3183,7 @@ impl ChannelMonitorImpl { F::Target: FeeEstimator, L::Target: Logger, { - log_trace!(logger, "Processing {} matched transactions for block at height {}.", txn_matched.len(), conf_height); + log_trace!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Processing {} matched transactions for block at height {}.", txn_matched.len(), conf_height); debug_assert!(self.best_block.height() >= conf_height); let should_broadcast = self.should_broadcast_holder_commitment_txn(logger); @@ -3253,7 +3254,7 @@ impl ChannelMonitorImpl { matured_htlcs.push(source.clone()); } - log_debug!(logger, "HTLC {} failure update in {} has got enough confirmations to be passed upstream", + log_debug!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "HTLC {} failure update in {} has got enough confirmations to be passed upstream", log_bytes!(payment_hash.0), entry.txid); self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate { payment_hash, @@ -3269,7 +3270,7 @@ impl ChannelMonitorImpl { }); }, OnchainEvent::MaturingOutput { descriptor } => { - log_debug!(logger, "Descriptor {} has got enough confirmations to be passed upstream", log_spendable!(descriptor)); + log_debug!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Descriptor {} has got enough confirmations to be passed upstream", log_spendable!(descriptor)); self.pending_events.push(Event::SpendableOutputs { outputs: vec![descriptor] }); @@ -3321,7 +3322,7 @@ impl ChannelMonitorImpl { F::Target: FeeEstimator, L::Target: Logger, { - log_trace!(logger, "Block {} at height {} disconnected", header.block_hash(), height); + log_trace!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Block {} at height {} disconnected", header.block_hash(), height); //We may discard: //- htlc update there as failure-trigger tx (revoked commitment tx, non-revoked commitment tx, HTLC-timeout tx) has been disconnected @@ -3346,6 +3347,7 @@ impl ChannelMonitorImpl { L::Target: Logger, { let mut removed_height = None; + let chan_id = self.funding_info.0.to_channel_id(); for entry in self.onchain_events_awaiting_threshold_conf.iter() { if entry.txid == *txid { removed_height = Some(entry.height); @@ -3354,9 +3356,9 @@ impl ChannelMonitorImpl { } if let Some(removed_height) = removed_height { - log_info!(logger, "transaction_unconfirmed of txid {} implies height {} was reorg'd out", txid, removed_height); + log_info!(logger, None, Some(chan_id), "transaction_unconfirmed of txid {} implies height {} was reorg'd out", txid, removed_height); self.onchain_events_awaiting_threshold_conf.retain(|ref entry| if entry.height >= removed_height { - log_info!(logger, "Transaction {} reorg'd out", entry.txid); + log_info!(logger, None, Some(chan_id), "Transaction {} reorg'd out", entry.txid); false } else { true }); } @@ -3469,7 +3471,7 @@ impl ChannelMonitorImpl { let htlc_outbound = $holder_tx == htlc.offered; if ( htlc_outbound && htlc.cltv_expiry + LATENCY_GRACE_PERIOD_BLOCKS <= height) || (!htlc_outbound && htlc.cltv_expiry <= height + CLTV_CLAIM_BUFFER && self.payment_preimages.contains_key(&htlc.payment_hash)) { - log_info!(logger, "Force-closing channel due to {} HTLC timeout, HTLC expiry is {}", if htlc_outbound { "outbound" } else { "inbound "}, htlc.cltv_expiry); + log_info!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Force-closing channel due to {} HTLC timeout, HTLC expiry is {}", if htlc_outbound { "outbound" } else { "inbound "}, htlc.cltv_expiry); return true; } } @@ -3528,12 +3530,12 @@ impl ChannelMonitorImpl { revocation_sig_claim as u8, 1); if ($holder_tx && revocation_sig_claim) || (outbound_htlc && !$source_avail && (accepted_preimage_claim || offered_preimage_claim)) { - log_error!(logger, "Input spending {} ({}:{}) in {} resolves {} HTLC with payment hash {} with {}!", + log_error!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Input spending {} ({}:{}) in {} resolves {} HTLC with payment hash {} with {}!", $tx_info, input.previous_output.txid, input.previous_output.vout, tx.txid(), if outbound_htlc { "outbound" } else { "inbound" }, log_bytes!($htlc.payment_hash.0), if revocation_sig_claim { "revocation sig" } else { "preimage claim after we'd passed the HTLC resolution back. We can likely claim the HTLC output with a revocation claim" }); } else { - log_info!(logger, "Input spending {} ({}:{}) in {} resolves {} HTLC with payment hash {} with {}", + log_info!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Input spending {} ({}:{}) in {} resolves {} HTLC with payment hash {} with {}", $tx_info, input.previous_output.txid, input.previous_output.vout, tx.txid(), if outbound_htlc { "outbound" } else { "inbound" }, log_bytes!($htlc.payment_hash.0), if revocation_sig_claim { "revocation sig" } else if accepted_preimage_claim || offered_preimage_claim { "preimage" } else { "timeout" }); @@ -3682,7 +3684,7 @@ impl ChannelMonitorImpl { commitment_tx_output_idx: Some(input.previous_output.vout), }, }; - log_info!(logger, "Failing HTLC with payment_hash {} timeout by a spend tx, waiting for confirmation (at height {})", log_bytes!(payment_hash.0), entry.confirmation_threshold()); + log_info!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Failing HTLC with payment_hash {} timeout by a spend tx, waiting for confirmation (at height {})", log_bytes!(payment_hash.0), entry.confirmation_threshold()); self.onchain_events_awaiting_threshold_conf.push(entry); } } @@ -3753,7 +3755,7 @@ impl ChannelMonitorImpl { block_hash: Some(*block_hash), event: OnchainEvent::MaturingOutput { descriptor: spendable_output.clone() }, }; - log_info!(logger, "Received spendable output {}, spendable at height {}", log_spendable!(spendable_output), entry.confirmation_threshold()); + log_info!(logger, self.counterparty_node_id, Some(self.funding_info.0.to_channel_id()), "Received spendable output {}, spendable at height {}", log_spendable!(spendable_output), entry.confirmation_threshold()); self.onchain_events_awaiting_threshold_conf.push(entry); } } diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 45968c57e53..5b9ef19369f 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -50,6 +50,8 @@ use core::mem::replace; use core::mem::swap; use bitcoin::hashes::Hash; +use crate::util::macro_logger::default_logging::*; + const MAX_ALLOC_SIZE: usize = 64*1024; /// An entry for an [`OnchainEvent`], stating the block height when the event was observed and the diff --git a/lightning/src/chain/package.rs b/lightning/src/chain/package.rs index 4604a164cd6..3f34181c951 100644 --- a/lightning/src/chain/package.rs +++ b/lightning/src/chain/package.rs @@ -43,6 +43,8 @@ use bitcoin::{PackedLockTime, Sequence, Witness}; use super::chaininterface::LowerBoundedFeeEstimator; +use crate::util::macro_logger::default_logging::*; + const MAX_ALLOC_SIZE: usize = 64*1024; diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 99b3c169114..9eab459330c 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -51,6 +51,7 @@ use core::ops::Deref; #[cfg(any(test, fuzzing, debug_assertions))] use crate::sync::Mutex; use bitcoin::hashes::hex::ToHex; +use crate::util::macro_logger::context_logging::*; #[cfg(test)] pub struct ChannelValueStat { @@ -1195,7 +1196,7 @@ impl Channel { if feerate_per_kw + 250 < lower_limit { if let Some(cur_feerate) = cur_feerate_per_kw { if feerate_per_kw > cur_feerate { - log_warn!(logger, + log_warn!(logger, None, None, "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.", cur_feerate, feerate_per_kw); return Ok(()); @@ -1340,7 +1341,7 @@ impl Channel { return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat))); } if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { - log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.", + log_debug!(logger, None, None, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.", msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS); } if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis { @@ -1575,7 +1576,7 @@ impl Channel { } } - log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...", + log_trace!(logger, None, Some(self.channel_id), "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...", commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number), get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()), log_bytes!(self.channel_id), if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw); @@ -1602,10 +1603,10 @@ impl Channel { feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000 }; if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee { - log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat); + log_trace!(logger, None, Some(self.channel_id), " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat); included_non_dust_htlcs.push((htlc_in_tx, $source)); } else { - log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat); + log_trace!(logger, None, Some(self.channel_id), " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat); included_dust_htlcs.push((htlc_in_tx, $source)); } } else { @@ -1616,10 +1617,10 @@ impl Channel { feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000 }; if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee { - log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat); + log_trace!(logger, None, Some(self.channel_id), " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat); included_non_dust_htlcs.push((htlc_in_tx, $source)); } else { - log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat); + log_trace!(logger, None, Some(self.channel_id), " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat); included_dust_htlcs.push((htlc_in_tx, $source)); } } @@ -1639,7 +1640,7 @@ impl Channel { add_htlc_output!(htlc, false, None, state_name); remote_htlc_total_msat += htlc.amount_msat; } else { - log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, log_bytes!(htlc.payment_hash.0), htlc.amount_msat, state_name); + log_trace!(logger, None, Some(self.channel_id), " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, log_bytes!(htlc.payment_hash.0), htlc.amount_msat, state_name); match &htlc.state { &InboundHTLCState::LocalRemoved(ref reason) => { if generated_by_local { @@ -1679,7 +1680,7 @@ impl Channel { add_htlc_output!(htlc, true, Some(&htlc.source), state_name); local_htlc_total_msat += htlc.amount_msat; } else { - log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, log_bytes!(htlc.payment_hash.0), htlc.amount_msat, state_name); + log_trace!(logger, None, Some(self.channel_id), " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, log_bytes!(htlc.payment_hash.0), htlc.amount_msat, state_name); match htlc.state { OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => { value_to_self_msat_offset -= htlc.amount_msat as i64; @@ -1735,13 +1736,13 @@ impl Channel { }; if value_to_a >= (broadcaster_dust_limit_satoshis as i64) { - log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a); + log_trace!(logger, None, Some(self.channel_id), " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a); } else { value_to_a = 0; } if value_to_b >= (broadcaster_dust_limit_satoshis as i64) { - log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b); + log_trace!(logger, None, Some(self.channel_id), " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b); } else { value_to_b = 0; } @@ -1941,7 +1942,7 @@ impl Channel { InboundHTLCState::LocalRemoved(ref reason) => { if let &InboundHTLCRemovalReason::Fulfill(_) = reason { } else { - log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id())); + log_warn!(logger, None, Some(self.channel_id), "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id())); debug_assert!(false, "Tried to fulfill an HTLC that was already failed"); } return UpdateFulfillFetch::DuplicateClaim {}; @@ -1994,7 +1995,7 @@ impl Channel { }, &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => { if htlc_id_arg == htlc_id { - log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", log_bytes!(self.channel_id())); + log_warn!(logger, None, Some(self.channel_id), "Have preimage and want to fulfill HTLC with pending failure against channel {}", log_bytes!(self.channel_id())); // TODO: We may actually be able to switch to a fulfill here, though its // rare enough it may not be worth the complexity burden. debug_assert!(false, "Tried to fulfill an HTLC that was already failed"); @@ -2004,7 +2005,7 @@ impl Channel { _ => {} } } - log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", log_bytes!(self.channel_id()), self.channel_state); + log_trace!(logger, None, Some(self.channel_id), "Adding HTLC claim to holding_cell in channel {}! Current state: {}", log_bytes!(self.channel_id()), self.channel_state); self.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC { payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg, }); @@ -2022,7 +2023,7 @@ impl Channel { debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None }; } - log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id)); + log_trace!(logger, None, Some(self.channel_id), "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id)); htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone())); } @@ -2174,7 +2175,7 @@ impl Channel { _ => {} } } - log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, log_bytes!(self.channel_id())); + log_trace!(logger, None, Some(self.channel_id), "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, log_bytes!(self.channel_id())); self.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC { htlc_id: htlc_id_arg, err_packet, @@ -2182,7 +2183,7 @@ impl Channel { return Ok(None); } - log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, log_bytes!(self.channel_id())); + log_trace!(logger, None, Some(self.channel_id), "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, log_bytes!(self.channel_id())); { let htlc = &mut self.pending_inbound_htlcs[pending_idx]; htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone())); @@ -2336,7 +2337,7 @@ impl Channel { let initial_commitment_bitcoin_tx = trusted_tx.built_transaction(); let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.channel_value_satoshis); // They sign the holder commitment transaction... - log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.", + log_trace!(logger, None, Some(self.channel_id), "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.", log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.channel_id())); @@ -2348,7 +2349,7 @@ impl Channel { let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); - log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}", + log_trace!(logger, None, Some(self.channel_id), "Initial counterparty tx for channel {} is: txid {} tx {}", log_bytes!(self.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); let counterparty_signature = self.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.secp_ctx) @@ -2440,7 +2441,7 @@ impl Channel { self.cur_counterparty_commitment_transaction_number -= 1; self.cur_holder_commitment_transaction_number -= 1; - log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.channel_id())); + log_info!(logger, None, Some(self.channel_id), "Generated funding_signed for peer for channel {}", log_bytes!(self.channel_id())); let need_channel_ready = self.check_get_channel_ready(0).is_some(); self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); @@ -2481,7 +2482,7 @@ impl Channel { let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); - log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}", + log_trace!(logger, None, Some(self.channel_id), "Initial counterparty tx for channel {} is: txid {} tx {}", log_bytes!(self.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); let holder_signer = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number); @@ -2530,7 +2531,7 @@ impl Channel { self.cur_holder_commitment_transaction_number -= 1; self.cur_counterparty_commitment_transaction_number -= 1; - log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.channel_id())); + log_info!(logger, None, Some(self.channel_id), "Received funding_signed from peer for channel {}", log_bytes!(self.channel_id())); let need_channel_ready = self.check_get_channel_ready(0).is_some(); self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); @@ -2604,7 +2605,7 @@ impl Channel { self.counterparty_prev_commitment_point = self.counterparty_cur_commitment_point; self.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point); - log_info!(logger, "Received channel_ready from peer for channel {}", log_bytes!(self.channel_id())); + log_info!(logger, None, Some(self.channel_id), "Received channel_ready from peer for channel {}", log_bytes!(self.channel_id())); Ok(self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger)) } @@ -2997,7 +2998,7 @@ impl Channel { if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats { let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat; if on_counterparty_tx_dust_htlc_exposure_msat > self.get_max_dust_htlc_exposure_msat() { - log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", + log_info!(logger, None, Some(self.channel_id), "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", on_counterparty_tx_dust_htlc_exposure_msat, self.get_max_dust_htlc_exposure_msat()); pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7); } @@ -3007,7 +3008,7 @@ impl Channel { if msg.amount_msat / 1000 < exposure_dust_limit_success_sats { let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat; if on_holder_tx_dust_htlc_exposure_msat > self.get_max_dust_htlc_exposure_msat() { - log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", + log_info!(logger, None, Some(self.channel_id), "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", on_holder_tx_dust_htlc_exposure_msat, self.get_max_dust_htlc_exposure_msat()); pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7); } @@ -3049,7 +3050,7 @@ impl Channel { if pending_remote_value_msat - msg.amount_msat - self.holder_selected_channel_reserve_satoshis * 1000 < remote_fee_cost_incl_stuck_buffer_msat { // Note that if the pending_forward_status is not updated here, then it's because we're already failing // the HTLC, i.e. its status is already set to failing. - log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", log_bytes!(self.channel_id())); + log_info!(logger, None, Some(self.channel_id), "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", log_bytes!(self.channel_id())); pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7); } } else { @@ -3174,7 +3175,7 @@ impl Channel { let bitcoin_tx = trusted_tx.built_transaction(); let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.channel_value_satoshis); - log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}", + log_trace!(logger, None, Some(self.channel_id), "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}", log_bytes!(msg.signature.serialize_compact()[..]), log_bytes!(self.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction), log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.channel_id())); @@ -3245,7 +3246,7 @@ impl Channel { let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, self.opt_anchors(), &keys); let htlc_sighashtype = if self.opt_anchors() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All }; let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]); - log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.", + log_trace!(logger, None, Some(self.channel_id), "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.", log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()), encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), log_bytes!(self.channel_id())); if let Err(_) = self.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) { @@ -3290,7 +3291,7 @@ impl Channel { Some(forward_info.clone()) } else { None }; if let Some(forward_info) = new_forward { - log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.", + log_trace!(logger, None, Some(self.channel_id), "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id)); htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info); need_commitment = true; @@ -3299,7 +3300,7 @@ impl Channel { let mut claimed_htlcs = Vec::new(); for htlc in self.pending_outbound_htlcs.iter_mut() { if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state { - log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.", + log_trace!(logger, None, Some(self.channel_id), "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id)); // Grab the preimage, if it exists, instead of cloning let mut reason = OutboundHTLCOutcome::Success(None); @@ -3349,7 +3350,7 @@ impl Channel { self.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); } - log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.", + log_debug!(logger, None, Some(self.channel_id), "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.", log_bytes!(self.channel_id)); return Ok(self.push_ret_blockable_mon_update(monitor_update)); } @@ -3366,7 +3367,7 @@ impl Channel { true } else { false }; - log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.", + log_debug!(logger, None, Some(self.channel_id), "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.", log_bytes!(self.channel_id()), if need_commitment_signed { " our own commitment_signed and" } else { "" }); self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new()); return Ok(self.push_ret_blockable_mon_update(monitor_update)); @@ -3387,7 +3388,7 @@ impl Channel { fn free_holding_cell_htlcs(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger { assert_eq!(self.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0); if self.holding_cell_htlc_updates.len() != 0 || self.holding_cell_update_fee.is_some() { - log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.holding_cell_htlc_updates.len(), + log_trace!(logger, None, Some(self.channel_id), "Freeing holding cell with {} HTLC updates{} in channel {}", self.holding_cell_htlc_updates.len(), if self.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, log_bytes!(self.channel_id())); let mut monitor_update = ChannelMonitorUpdate { @@ -3414,7 +3415,7 @@ impl Channel { Err(e) => { match e { ChannelError::Ignore(ref msg) => { - log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", + log_info!(logger, None, Some(self.channel_id), "Failed to send HTLC with payment_hash {} due to {} in channel {}", log_bytes!(payment_hash.0), msg, log_bytes!(self.channel_id())); // If we fail to send here, then this HTLC should // be failed backwards. Failing to send here @@ -3479,7 +3480,7 @@ impl Channel { self.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); - log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.", + log_debug!(logger, None, Some(self.channel_id), "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.", log_bytes!(self.channel_id()), if update_fee.is_some() { "a fee update, " } else { "" }, update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len()); @@ -3563,7 +3564,7 @@ impl Channel { self.announcement_sigs_state = AnnouncementSigsState::PeerReceived; } - log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", log_bytes!(self.channel_id())); + log_trace!(logger, None, Some(self.channel_id), "Updating HTLCs on receipt of RAA in channel {}...", log_bytes!(self.channel_id())); let mut to_forward_infos = Vec::new(); let mut revoked_htlcs = Vec::new(); let mut finalized_claimed_htlcs = Vec::new(); @@ -3576,11 +3577,12 @@ impl Channel { // Take references explicitly so that we can hold multiple references to self. let pending_inbound_htlcs: &mut Vec<_> = &mut self.pending_inbound_htlcs; let pending_outbound_htlcs: &mut Vec<_> = &mut self.pending_outbound_htlcs; + let chan_id = self.channel_id; // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug) pending_inbound_htlcs.retain(|htlc| { if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state { - log_trace!(logger, " ...removing inbound LocalRemoved {}", log_bytes!(htlc.payment_hash.0)); + log_trace!(logger, None, Some(chan_id), " ...removing inbound LocalRemoved {}", log_bytes!(htlc.payment_hash.0)); if let &InboundHTLCRemovalReason::Fulfill(_) = reason { value_to_self_msat_diff += htlc.amount_msat as i64; } @@ -3589,7 +3591,7 @@ impl Channel { }); pending_outbound_htlcs.retain(|htlc| { if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state { - log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", log_bytes!(htlc.payment_hash.0)); + log_trace!(logger, None, Some(chan_id), " ...removing outbound AwaitingRemovedRemoteRevoke {}", log_bytes!(htlc.payment_hash.0)); if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :( revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason)); } else { @@ -3611,13 +3613,13 @@ impl Channel { mem::swap(&mut state, &mut htlc.state); if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state { - log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", log_bytes!(htlc.payment_hash.0)); + log_trace!(logger, None, Some(self.channel_id), " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", log_bytes!(htlc.payment_hash.0)); htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info); require_commitment = true; } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state { match forward_info { PendingHTLCStatus::Fail(fail_msg) => { - log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", log_bytes!(htlc.payment_hash.0)); + log_trace!(logger, None, Some(self.channel_id), " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", log_bytes!(htlc.payment_hash.0)); require_commitment = true; match fail_msg { HTLCFailureMsg::Relay(msg) => { @@ -3631,7 +3633,7 @@ impl Channel { } }, PendingHTLCStatus::Forward(forward_info) => { - log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", log_bytes!(htlc.payment_hash.0)); + log_trace!(logger, None, Some(self.channel_id), " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", log_bytes!(htlc.payment_hash.0)); to_forward_infos.push((forward_info, htlc.htlc_id)); htlc.state = InboundHTLCState::Committed; } @@ -3641,11 +3643,11 @@ impl Channel { } for htlc in pending_outbound_htlcs.iter_mut() { if let OutboundHTLCState::LocalAnnounced(_) = htlc.state { - log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", log_bytes!(htlc.payment_hash.0)); + log_trace!(logger, None, Some(self.channel_id), " ...promoting outbound LocalAnnounced {} to Committed", log_bytes!(htlc.payment_hash.0)); htlc.state = OutboundHTLCState::Committed; } if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state { - log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", log_bytes!(htlc.payment_hash.0)); + log_trace!(logger, None, Some(self.channel_id), " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", log_bytes!(htlc.payment_hash.0)); // Grab the preimage, if it exists, instead of cloning let mut reason = OutboundHTLCOutcome::Success(None); mem::swap(outcome, &mut reason); @@ -3660,14 +3662,14 @@ impl Channel { match update_state { FeeUpdateState::Outbound => { debug_assert!(self.is_outbound()); - log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate); + log_trace!(logger, None, Some(self.channel_id), " ...promoting outbound fee update {} to Committed", feerate); self.feerate_per_kw = feerate; self.pending_update_fee = None; }, FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); }, FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); - log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate); + log_trace!(logger, None, Some(self.channel_id), " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate); require_commitment = true; self.feerate_per_kw = feerate; self.pending_update_fee = None; @@ -3692,7 +3694,7 @@ impl Channel { self.monitor_pending_forwards.append(&mut to_forward_infos); self.monitor_pending_failures.append(&mut revoked_htlcs); self.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs); - log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.channel_id())); + log_debug!(logger, None, Some(self.channel_id), "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.channel_id())); return Ok((Vec::new(), self.push_ret_blockable_mon_update(monitor_update))); } @@ -3716,12 +3718,12 @@ impl Channel { self.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); - log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.", + log_debug!(logger, None, Some(self.channel_id), "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.", log_bytes!(self.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len()); self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs); Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update))) } else { - log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.channel_id())); + log_debug!(logger, None, Some(self.channel_id), "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.channel_id())); self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs); Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update))) } @@ -3764,7 +3766,7 @@ impl Channel { let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat; if holder_balance_msat < buffer_fee_msat + self.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 { //TODO: auto-close after a number of failures? - log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw); + log_debug!(logger, None, Some(self.channel_id), "Cannot afford to send new feerate at {}", feerate_per_kw); return None; } @@ -3772,11 +3774,11 @@ impl Channel { let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat; let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat; if holder_tx_dust_exposure > self.get_max_dust_htlc_exposure_msat() { - log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw); + log_debug!(logger, None, Some(self.channel_id), "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw); return None; } if counterparty_tx_dust_exposure > self.get_max_dust_htlc_exposure_msat() { - log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw); + log_debug!(logger, None, Some(self.channel_id), "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw); return None; } @@ -3873,7 +3875,7 @@ impl Channel { self.sent_message_awaiting_response = None; self.channel_state |= ChannelState::PeerDisconnected as u32; - log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, log_bytes!(self.channel_id())); + log_trace!(logger, None, Some(self.channel_id), "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, log_bytes!(self.channel_id())); } /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted. @@ -3981,7 +3983,7 @@ impl Channel { self.monitor_pending_revoke_and_ack = false; self.monitor_pending_commitment_signed = false; let order = self.resend_order.clone(); - log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first", + log_debug!(logger, None, Some(self.channel_id), "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first", log_bytes!(self.channel_id()), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" }, if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" }, match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"}); @@ -4091,7 +4093,7 @@ impl Channel { }) } else { None }; - log_trace!(logger, "Regenerated latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds", + log_trace!(logger, None, Some(self.channel_id), "Regenerated latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds", log_bytes!(self.channel_id()), if update_fee.is_some() { " update_fee," } else { "" }, update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len()); msgs::CommitmentUpdate { @@ -4137,7 +4139,7 @@ impl Channel { if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number { macro_rules! log_and_panic { ($err_msg: expr) => { - log_error!(logger, $err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id)); + log_error!(logger, None, Some(self.channel_id), $err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id)); panic!($err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id)); } } @@ -4243,9 +4245,9 @@ impl Channel { if msg.next_local_commitment_number == next_counterparty_commitment_number { if required_revoke.is_some() { - log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", log_bytes!(self.channel_id())); + log_debug!(logger, None, Some(self.channel_id), "Reconnected channel {} with only lost outbound RAA", log_bytes!(self.channel_id())); } else { - log_debug!(logger, "Reconnected channel {} with no loss", log_bytes!(self.channel_id())); + log_debug!(logger, None, Some(self.channel_id), "Reconnected channel {} with no loss", log_bytes!(self.channel_id())); } Ok(ReestablishResponses { @@ -4256,9 +4258,9 @@ impl Channel { }) } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 { if required_revoke.is_some() { - log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", log_bytes!(self.channel_id())); + log_debug!(logger, None, Some(self.channel_id), "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", log_bytes!(self.channel_id())); } else { - log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", log_bytes!(self.channel_id())); + log_debug!(logger, None, Some(self.channel_id), "Reconnected channel {} with only lost remote commitment tx", log_bytes!(self.channel_id())); } if self.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 { @@ -4379,7 +4381,7 @@ impl Channel { assert!(self.shutdown_scriptpubkey.is_some()); let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false); - log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)", + log_trace!(logger, None, Some(self.channel_id), "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)", our_min_fee, our_max_fee, total_fee_satoshis); let sig = self.holder_signer @@ -5313,14 +5315,14 @@ impl Channel { // send it immediately instead of waiting for a best_block_updated call (which // may have already happened for this block). if let Some(channel_ready) = self.check_get_channel_ready(height) { - log_info!(logger, "Sending a channel_ready to our peer for channel {}", log_bytes!(self.channel_id)); + log_info!(logger, None, Some(self.channel_id), "Sending a channel_ready to our peer for channel {}", log_bytes!(self.channel_id)); let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger); return Ok((Some(channel_ready), announcement_sigs)); } } for inp in tx.input.iter() { if inp.previous_output == funding_txo.into_bitcoin_outpoint() { - log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(self.channel_id())); + log_info!(logger, None, Some(self.channel_id), "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(self.channel_id())); return Err(ClosureReason::CommitmentTxConfirmed); } } @@ -5382,7 +5384,7 @@ impl Channel { let announcement_sigs = if let Some((genesis_block_hash, node_signer, user_config)) = genesis_node_signer { self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger) } else { None }; - log_info!(logger, "Sending a channel_ready to our peer for channel {}", log_bytes!(self.channel_id)); + log_info!(logger, None, Some(self.channel_id), "Sending a channel_ready to our peer for channel {}", log_bytes!(self.channel_id)); return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs)); } @@ -5413,7 +5415,7 @@ impl Channel { } } else if !self.is_outbound() && self.funding_tx_confirmed_in.is_none() && height >= self.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS { - log_info!(logger, "Closing channel {} due to funding timeout", log_bytes!(self.channel_id)); + log_info!(logger, None, Some(self.channel_id), "Closing channel {} due to funding timeout", log_bytes!(self.channel_id)); // If funding_tx_confirmed_in is unset, the channel must not be active assert!(non_shutdown_state <= ChannelState::ChannelReady as u32); assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0); @@ -5609,7 +5611,7 @@ impl Channel { let signature = match self.get_outbound_funding_created_signature(logger) { Ok(res) => res, Err(e) => { - log_error!(logger, "Got bad signatures: {:?}!", e); + log_error!(logger, None, Some(self.channel_id), "Got bad signatures: {:?}!", e); self.channel_transaction_parameters.funding_outpoint = None; return Err(e); } @@ -5690,7 +5692,7 @@ impl Channel { } if self.channel_state & ChannelState::PeerDisconnected as u32 != 0 { - log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected"); + log_trace!(logger, None, Some(self.channel_id), "Cannot create an announcement_signatures as our peer is disconnected"); return None; } @@ -5698,24 +5700,24 @@ impl Channel { return None; } - log_trace!(logger, "Creating an announcement_signatures message for channel {}", log_bytes!(self.channel_id())); + log_trace!(logger, None, Some(self.channel_id), "Creating an announcement_signatures message for channel {}", log_bytes!(self.channel_id())); let announcement = match self.get_channel_announcement(node_signer, genesis_block_hash, user_config) { Ok(a) => a, Err(e) => { - log_trace!(logger, "{:?}", e); + log_trace!(logger, None, Some(self.channel_id), "{:?}", e); return None; } }; let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) { Err(_) => { - log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!"); + log_error!(logger, None, Some(self.channel_id), "Failed to generate node signature for channel_announcement. Channel will not be announced!"); return None; }, Ok(v) => v }; let our_bitcoin_sig = match self.holder_signer.sign_channel_announcement_with_funding_key(&announcement, &self.secp_ctx) { Err(_) => { - log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!"); + log_error!(logger, None, Some(self.channel_id), "Signer rejected channel_announcement signing. Channel will not be announced!"); return None; }, Ok(v) => v @@ -5820,10 +5822,10 @@ impl Channel { let dummy_pubkey = PublicKey::from_slice(&pk).unwrap(); let remote_last_secret = if self.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER { let remote_last_secret = self.commitment_secrets.get_secret(self.cur_counterparty_commitment_transaction_number + 2).unwrap(); - log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), log_bytes!(self.channel_id())); + log_trace!(logger, None, Some(self.channel_id), "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), log_bytes!(self.channel_id())); remote_last_secret } else { - log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", log_bytes!(self.channel_id())); + log_info!(logger, None, Some(self.channel_id), "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", log_bytes!(self.channel_id())); [0;32] }; self.mark_awaiting_response(); @@ -6029,7 +6031,7 @@ impl Channel { } fn build_commitment_no_status_check(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger { - log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed..."); + log_trace!(logger, None, Some(self.channel_id), "Updating HTLC state for a newly-sent commitment_signed..."); // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we // fail to generate this, we still are at least at a position where upgrading their status // is acceptable. @@ -6038,13 +6040,13 @@ impl Channel { Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone())) } else { None }; if let Some(state) = new_state { - log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", log_bytes!(htlc.payment_hash.0)); + log_trace!(logger, None, Some(self.channel_id), " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", log_bytes!(htlc.payment_hash.0)); htlc.state = state; } } for htlc in self.pending_outbound_htlcs.iter_mut() { if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state { - log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", log_bytes!(htlc.payment_hash.0)); + log_trace!(logger, None, Some(self.channel_id), " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", log_bytes!(htlc.payment_hash.0)); // Grab the preimage, if it exists, instead of cloning let mut reason = OutboundHTLCOutcome::Success(None); mem::swap(outcome, &mut reason); @@ -6054,7 +6056,7 @@ impl Channel { if let Some((feerate, update_state)) = self.pending_update_fee { if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce { debug_assert!(!self.is_outbound()); - log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate); + log_trace!(logger, None, Some(self.channel_id), " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate); self.feerate_per_kw = feerate; self.pending_update_fee = None; } @@ -6132,13 +6134,13 @@ impl Channel { signature = res.0; htlc_signatures = res.1; - log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}", + log_trace!(logger, None, Some(self.channel_id), "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}", encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction), &counterparty_commitment_txid, encode::serialize_hex(&self.get_funding_redeemscript()), log_bytes!(signature.serialize_compact()[..]), log_bytes!(self.channel_id())); for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) { - log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}", + log_trace!(logger, None, Some(self.channel_id), "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}", encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.get_holder_selected_contest_delay(), htlc, self.opt_anchors(), false, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)), encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, self.opt_anchors(), &counterparty_keys)), log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()), @@ -7805,7 +7807,7 @@ mod tests { let redeemscript = chan.get_funding_redeemscript(); let counterparty_signature = Signature::from_der(&hex::decode($counterparty_sig_hex).unwrap()[..]).unwrap(); let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.channel_value_satoshis); - log_trace!(logger, "unsigned_tx = {}", hex::encode(serialize(&unsigned_tx.transaction))); + log_trace!(logger, None, Some(chan.channel_id), "unsigned_tx = {}", hex::encode(serialize(&unsigned_tx.transaction))); assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig"); let mut per_htlc: Vec<(HTLCOutputInCommitment, Option)> = Vec::new(); @@ -7837,7 +7839,7 @@ mod tests { let mut htlc_sig_iter = holder_commitment_tx.htlcs().iter().zip(&holder_commitment_tx.counterparty_htlc_sigs).zip(htlc_sigs.iter().enumerate()); $({ - log_trace!(logger, "verifying htlc {}", $htlc_idx); + log_trace!(logger, Some(chan.counterparty_node_id), Some(chan.channel_id), "verifying htlc {}", $htlc_idx); let remote_signature = Signature::from_der(&hex::decode($counterparty_htlc_sig_hex).unwrap()[..]).unwrap(); let ref htlc = htlcs[$htlc_idx]; @@ -7870,7 +7872,7 @@ mod tests { let index = (htlc_sig.1).0; let channel_parameters = chan.channel_transaction_parameters.as_holder_broadcastable(); let trusted_tx = holder_commitment_tx.trust(); - log_trace!(logger, "htlc_tx = {}", hex::encode(serialize(&trusted_tx.get_signed_htlc_tx(&channel_parameters, index, &(htlc_sig.0).1, (htlc_sig.1).1, &preimage)))); + log_trace!(logger, Some(chan.counterparty_node_id), Some(chan.channel_id), "htlc_tx = {}", hex::encode(serialize(&trusted_tx.get_signed_htlc_tx(&channel_parameters, index, &(htlc_sig.0).1, (htlc_sig.1).1, &preimage)))); assert_eq!(serialize(&trusted_tx.get_signed_htlc_tx(&channel_parameters, index, &(htlc_sig.0).1, (htlc_sig.1).1, &preimage))[..], hex::decode($htlc_tx_hex).unwrap()[..], "htlc tx"); })* diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 398f395492f..57d37845b0a 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -80,6 +80,8 @@ use core::ops::Deref; pub use crate::ln::outbound_payment::{PaymentSendFailure, Retry, RetryableSendFailure, RecipientOnionFields}; use crate::ln::script::ShutdownScript; +use crate::util::macro_logger::context_logging::*; + // We hold various information about HTLC relay in the HTLC objects in Channel itself: // // Upon receipt of an HTLC from a peer, we'll give it a PendingHTLCStatus indicating if it should @@ -1579,8 +1581,7 @@ macro_rules! handle_error { }, None)); } } - - log_error!($self.logger, "{}", err.err); + log_error!($self.logger, None, None, "{}", err.err); if let msgs::ErrorAction::IgnoreError = err.action { } else { msg_events.push(events::MessageSendEvent::HandleError { @@ -1635,7 +1636,7 @@ macro_rules! convert_chan_err { (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone())) }, ChannelError::Close(msg) => { - log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg); + log_error!($self.logger, None, Some($channel_id.clone()), "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg); update_maps_on_chan_removal!($self, $channel); let shutdown_res = $channel.force_shutdown(true); (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(), @@ -1794,12 +1795,12 @@ macro_rules! handle_new_monitor_update { } match $update_res { ChannelMonitorUpdateStatus::InProgress => { - log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.", + log_debug!($self.logger, None, Some($chan.channel_id()), "ChannelMonitor update for {} in flight, holding messages until the update completes.", log_bytes!($chan.channel_id()[..])); Ok(()) }, ChannelMonitorUpdateStatus::PermanentFailure => { - log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure", + log_error!($self.logger, None, Some($chan.channel_id()), "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure", log_bytes!($chan.channel_id()[..])); update_maps_on_chan_removal!($self, $chan); let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown( @@ -2298,7 +2299,7 @@ where #[inline] fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) { let (monitor_update_option, mut failed_htlcs) = shutdown_res; - log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len()); + log_debug!(self.logger, None, None, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len()); for htlc_source in failed_htlcs.drain(..) { let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; let reason = HTLCFailReason::from_failure_code(0x4000 | 8); @@ -2335,7 +2336,7 @@ where return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), peer_node_id) }); } }; - log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); + log_error!(self.logger, Some(*peer_node_id), Some(*channel_id), "Force-closing channel {}", log_bytes!(channel_id[..])); self.finish_force_close_channel(chan.force_shutdown(broadcast)); if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { let mut peer_state = peer_state_mutex.lock().unwrap(); @@ -2506,7 +2507,7 @@ where macro_rules! return_malformed_err { ($msg: expr, $err_code: expr) => { { - log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg); + log_info!(self.logger, None, Some(msg.channel_id), "Failed to accept/forward incoming HTLC: {}", $msg); return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC { channel_id: msg.channel_id, htlc_id: msg.htlc_id, @@ -2537,7 +2538,7 @@ where macro_rules! return_err { ($msg: expr, $err_code: expr, $data: expr) => { { - log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg); + log_info!(self.logger, None, Some(msg.channel_id), "Failed to accept/forward incoming HTLC: {}", $msg); return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { channel_id: msg.channel_id, htlc_id: msg.htlc_id, @@ -2764,7 +2765,7 @@ where if chan.get_short_channel_id().is_none() { return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}); } - log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.channel_id())); + log_trace!(self.logger, None, Some(chan.channel_id()), "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.channel_id())); self.get_channel_update_for_unicast(chan) } @@ -2780,7 +2781,7 @@ where /// [`channel_update`]: msgs::ChannelUpdate /// [`internal_closing_signed`]: Self::internal_closing_signed fn get_channel_update_for_unicast(&self, chan: &Channel<::Signer>) -> Result { - log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id())); + log_trace!(self.logger, None, Some(chan.channel_id()), "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id())); let short_channel_id = match chan.get_short_channel_id().or(chan.latest_inbound_scid_alias()) { None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}), Some(id) => id, @@ -2789,7 +2790,7 @@ where self.get_channel_update_for_onion(short_channel_id, chan) } fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<::Signer>) -> Result { - log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.channel_id())); + log_trace!(self.logger, None, Some(chan.channel_id()), "Generating channel update for channel {}", log_bytes!(chan.channel_id())); let were_node_one = self.our_network_pubkey.serialize()[..] < chan.get_counterparty_node_id().serialize()[..]; let enabled = chan.is_usable() && match chan.channel_update_status() { @@ -2832,8 +2833,7 @@ where fn send_payment_along_path(&self, path: &Path, payment_hash: &PaymentHash, recipient_onion: RecipientOnionFields, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option, session_priv_bytes: [u8; 32]) -> Result<(), APIError> { // The top-level caller should hold the total_consistency_lock read lock. debug_assert!(self.total_consistency_lock.try_write().is_err()); - - log_trace!(self.logger, "Attempting to send payment for path with next hop {}", path.hops.first().unwrap().short_channel_id); + log_trace!(self.logger, Some(path.hops.first().unwrap().pubkey), None, "Attempting to send payment for path with next hop {}", path.hops.first().unwrap().short_channel_id); let prng_seed = self.entropy_source.get_secure_random_bytes(); let session_priv = SecretKey::from_slice(&session_priv_bytes[..]).expect("RNG is busted"); @@ -3421,7 +3421,7 @@ where }) => { macro_rules! failure_handler { ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => { - log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg); + log_info!(self.logger, None, None, "Failed to accept/forward incoming HTLC: {}", $msg); let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: prev_short_channel_id, @@ -3532,7 +3532,7 @@ where routing: PendingHTLCRouting::Forward { onion_packet, .. }, incoming_amt_msat: _, }, }) => { - log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, log_bytes!(payment_hash.0), short_chan_id); + log_trace!(self.logger, None, Some(chan.key().clone()), "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, log_bytes!(payment_hash.0), short_chan_id); let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: prev_short_channel_id, outpoint: prev_funding_outpoint, @@ -3546,7 +3546,7 @@ where onion_packet, &self.logger) { if let ChannelError::Ignore(msg) = e { - log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg); + log_trace!(self.logger, None, Some(chan.key().clone()), "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg); } else { panic!("Stated return value requirements in send_htlc() were not met"); } @@ -3562,12 +3562,12 @@ where panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward"); }, HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => { - log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); + log_trace!(self.logger, None, Some(chan.key().clone()), "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); if let Err(e) = chan.get_mut().queue_fail_htlc( htlc_id, err_packet, &self.logger ) { if let ChannelError::Ignore(msg) = e { - log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg); + log_trace!(self.logger, None, Some(chan.key().clone()), "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg); } else { panic!("Stated return value requirements in queue_fail_htlc() were not met"); } @@ -3688,7 +3688,7 @@ where let ref mut htlcs = &mut claimable_payment.htlcs; if htlcs.len() == 1 { if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload { - log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash", log_bytes!(payment_hash.0)); + log_trace!(self.logger, None, None, "Failing new HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash", log_bytes!(payment_hash.0)); fail_htlc!(claimable_htlc, payment_hash); } } @@ -3700,7 +3700,7 @@ where match &htlc.onion_payload { OnionPayload::Invoice { .. } => { if htlc.total_msat != $payment_data.total_msat { - log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})", + log_trace!(self.logger, None, None, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})", log_bytes!(payment_hash.0), $payment_data.total_msat, htlc.total_msat); total_value = msgs::MAX_VALUE_MSAT; } @@ -3714,7 +3714,7 @@ where if total_value >= msgs::MAX_VALUE_MSAT { fail_htlc!(claimable_htlc, payment_hash); } else if total_value - claimable_htlc.sender_intended_value >= $payment_data.total_msat { - log_trace!(self.logger, "Failing HTLC with payment_hash {} as payment is already claimable", + log_trace!(self.logger, None, None, "Failing HTLC with payment_hash {} as payment is already claimable", log_bytes!(payment_hash.0)); fail_htlc!(claimable_htlc, payment_hash); } else if total_value >= $payment_data.total_msat { @@ -3764,14 +3764,14 @@ where let (payment_preimage, min_final_cltv_expiry_delta) = match inbound_payment::verify(payment_hash, &payment_data, self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) { Ok(result) => result, Err(()) => { - log_trace!(self.logger, "Failing new HTLC with payment_hash {} as payment verification failed", log_bytes!(payment_hash.0)); + log_trace!(self.logger, None, None, "Failing new HTLC with payment_hash {} as payment verification failed", log_bytes!(payment_hash.0)); fail_htlc!(claimable_htlc, payment_hash); } }; if let Some(min_final_cltv_expiry_delta) = min_final_cltv_expiry_delta { let expected_min_expiry_height = (self.current_best_block().height() + min_final_cltv_expiry_delta as u32) as u64; if (cltv_expiry as u64) < expected_min_expiry_height { - log_trace!(self.logger, "Failing new HTLC with payment_hash {} as its CLTV expiry was too soon (had {}, earliest expected {})", + log_trace!(self.logger, None, None, "Failing new HTLC with payment_hash {} as its CLTV expiry was too soon (had {}, earliest expected {})", log_bytes!(payment_hash.0), cltv_expiry, expected_min_expiry_height); fail_htlc!(claimable_htlc, payment_hash); } @@ -3807,7 +3807,7 @@ where }, None)); }, hash_map::Entry::Occupied(_) => { - log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} for a duplicative payment hash", log_bytes!(payment_hash.0)); + log_trace!(self.logger, None, None, "Failing new keysend HTLC with payment_hash {} for a duplicative payment hash", log_bytes!(payment_hash.0)); fail_htlc!(claimable_htlc, payment_hash); } } @@ -3816,15 +3816,15 @@ where }, hash_map::Entry::Occupied(inbound_payment) => { if payment_data.is_none() { - log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0)); + log_trace!(self.logger, None, None, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0)); fail_htlc!(claimable_htlc, payment_hash); }; let payment_data = payment_data.unwrap(); if inbound_payment.get().payment_secret != payment_data.payment_secret { - log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", log_bytes!(payment_hash.0)); + log_trace!(self.logger, None, None, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", log_bytes!(payment_hash.0)); fail_htlc!(claimable_htlc, payment_hash); } else if inbound_payment.get().min_value_msat.is_some() && payment_data.total_msat < inbound_payment.get().min_value_msat.unwrap() { - log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our minimum value (had {}, needed {}).", + log_trace!(self.logger, None, None, "Failing new HTLC with payment_hash {} as it didn't match our minimum value (had {}, needed {}).", log_bytes!(payment_hash.0), payment_data.total_msat, inbound_payment.get().min_value_msat.unwrap()); fail_htlc!(claimable_htlc, payment_hash); } else { @@ -3910,7 +3910,7 @@ where // preimage update, which we must ensure is durable! We currently don't, // however, ensure that. if res.is_err() { - log_error!(self.logger, + log_error!(self.logger, None, None, "Failed to provide ChannelMonitorUpdate to closed channel! This likely lost us a payment preimage!"); } let _ = handle_error!(self, res, counterparty_node_id); @@ -3931,16 +3931,16 @@ where if !chan.is_outbound() { return NotifyOption::SkipPersist; } // If the feerate has decreased by less than half, don't bother if new_feerate <= chan.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.get_feerate_sat_per_1000_weight() { - log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.", + log_trace!(self.logger, None, Some(*chan_id), "Channel {} does not qualify for a feerate change from {} to {}.", log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate); return NotifyOption::SkipPersist; } if !chan.is_live() { - log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).", + log_trace!(self.logger, None, Some(*chan_id), "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).", log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate); return NotifyOption::SkipPersist; } - log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.", + log_trace!(self.logger, None, Some(*chan_id), "Channel {} qualifies for a feerate change from {} to {}.", log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate); chan.queue_update_fee(new_feerate, &self.logger); @@ -4055,7 +4055,7 @@ where chan.maybe_expire_prev_config(); if chan.should_disconnect_peer_awaiting_response() { - log_debug!(self.logger, "Disconnecting peer {} due to not making any progress on channel {}", + log_debug!(self.logger, None, None, "Disconnecting peer {} due to not making any progress on channel {}", counterparty_node_id, log_bytes!(*chan_id)); pending_msg_events.push(MessageSendEvent::HandleError { node_id: counterparty_node_id, @@ -4304,7 +4304,7 @@ where { self.push_pending_forwards_ev(); } }, HTLCSource::PreviousHopData(HTLCPreviousHopData { ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret, ref phantom_shared_secret, ref outpoint }) => { - log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with {:?}", log_bytes!(payment_hash.0), onion_error); + log_trace!(self.logger, None, None, "Failing HTLC with payment_hash {} backwards from us with {:?}", log_bytes!(payment_hash.0), onion_error); let err_packet = onion_error.get_encrypted_failure_packet(incoming_packet_shared_secret, phantom_shared_secret); let mut push_forward_ev = false; @@ -4374,7 +4374,7 @@ where }); if dup_purpose.is_some() { debug_assert!(false, "Shouldn't get a duplicate pending claim event ever"); - log_error!(self.logger, "Got a duplicate pending claimable event on payment hash {}! Please report this bug", + log_error!(self.logger, None, None, "Got a duplicate pending claimable event on payment hash {}! Please report this bug", log_bytes!(payment_hash.0)); } payment.htlcs @@ -4394,7 +4394,7 @@ where let per_peer_state = self.per_peer_state.read().unwrap(); for htlc in sources.iter() { if prev_total_msat.is_some() && prev_total_msat != Some(htlc.total_msat) { - log_error!(self.logger, "Somehow ended up with an MPP payment with different expected total amounts - this should not be reachable!"); + log_error!(self.logger, None, None, "Somehow ended up with an MPP payment with different expected total amounts - this should not be reachable!"); debug_assert!(false); valid_mpp = false; break; @@ -4402,7 +4402,7 @@ where prev_total_msat = Some(htlc.total_msat); if expected_amt_msat.is_some() && expected_amt_msat != htlc.total_value_received { - log_error!(self.logger, "Somehow ended up with an MPP payment with different received total amounts - this should not be reachable!"); + log_error!(self.logger, None, None, "Somehow ended up with an MPP payment with different received total amounts - this should not be reachable!"); debug_assert!(false); valid_mpp = false; break; @@ -4413,7 +4413,7 @@ where // We don't currently support MPP for spontaneous payments, so just check // that there's one payment here and move on. if sources.len() != 1 { - log_error!(self.logger, "Somehow ended up with an MPP spontaneous payment - this should not be reachable!"); + log_error!(self.logger, None, None, "Somehow ended up with an MPP spontaneous payment - this should not be reachable!"); debug_assert!(false); valid_mpp = false; break; @@ -4425,12 +4425,12 @@ where mem::drop(per_peer_state); if sources.is_empty() || expected_amt_msat.is_none() { self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash); - log_info!(self.logger, "Attempted to claim an incomplete payment which no longer had any available HTLCs!"); + log_info!(self.logger, None, None, "Attempted to claim an incomplete payment which no longer had any available HTLCs!"); return; } if claimable_amt_msat != expected_amt_msat.unwrap() { self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash); - log_info!(self.logger, "Attempted to claim an incomplete payment, expected {} msat, had {} available to claim.", + log_info!(self.logger, None, None, "Attempted to claim an incomplete payment, expected {} msat, had {} available to claim.", expected_amt_msat.unwrap(), claimable_amt_msat); return; } @@ -4443,7 +4443,7 @@ where if let msgs::ErrorAction::IgnoreError = err.err.action { // We got a temporary failure updating monitor, but will claim the // HTLC when the monitor updating is restored (or on chain). - log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err); + log_error!(self.logger, None, None, "Temporary failure claiming HTLC, treating as success: {}", err.err.err); } else { errs.push((pk, err)); } } } @@ -4494,7 +4494,7 @@ where if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res { if let Some(action) = completion_action(Some(htlc_value_msat)) { - log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}", + log_trace!(self.logger, Some(counterparty_node_id), Some(chan_id), "Tracking monitor update completion action for channel {}: {:?}", log_bytes!(chan_id), action); peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action); } @@ -4506,7 +4506,7 @@ where // TODO: This is a *critical* error - we probably updated the outbound edge // of the HTLC's monitor with a preimage. We should retry this monitor // update over and over again until morale improves. - log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage); + log_error!(self.logger, Some(counterparty_node_id), Some(chan_id), "Failed to update channel monitor with preimage {:?}", payment_preimage); return Err((counterparty_node_id, e)); } } @@ -4528,7 +4528,7 @@ where // with a preimage we *must* somehow manage to propagate it to the upstream // channel, or we must have an ability to receive the same event and try // again on restart. - log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}", + log_error!(self.logger, None, None, "Critical error: failed to update channel monitor with preimage {:?}: {:?}", payment_preimage, update_res); } // Note that we do process the completion action here. This totally could be a @@ -4614,7 +4614,7 @@ where pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option, channel_ready: Option, announcement_sigs: Option) -> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> { - log_trace!(self.logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement", + log_trace!(self.logger, None, Some(channel.channel_id()), "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement", log_bytes!(channel.channel_id()), if raa.is_some() { "an" } else { "no" }, if commitment_update.is_some() { "a" } else { "no" }, pending_forwards.len(), @@ -4668,7 +4668,7 @@ where } if let Some(tx) = funding_broadcastable { - log_info!(self.logger, "Broadcasting funding transaction with txid {}", tx.txid()); + log_info!(self.logger, None, None, "Broadcasting funding transaction with txid {}", tx.txid()); self.tx_broadcaster.broadcast_transactions(&[&tx]); } @@ -4708,7 +4708,7 @@ where hash_map::Entry::Vacant(_) => return, } }; - log_trace!(self.logger, "ChannelMonitor updated to {}. Current highest is {}", + log_trace!(self.logger, None, Some(funding_txo.to_channel_id()), "ChannelMonitor updated to {}. Current highest is {}", highest_applied_update_id, channel.get().get_latest_monitor_update_id()); if !channel.get().is_awaiting_monitor_update() || channel.get().get_latest_monitor_update_id() != highest_applied_update_id { return; @@ -5080,7 +5080,7 @@ where let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, &self.node_signer, self.genesis_hash.clone(), &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan); if let Some(announcement_sigs) = announcement_sigs_opt { - log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().channel_id())); + log_trace!(self.logger, Some(*counterparty_node_id), Some(chan.get().channel_id()), "Sending announcement_signatures for channel {}", log_bytes!(chan.get().channel_id())); peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { node_id: counterparty_node_id.clone(), msg: announcement_sigs, @@ -5091,7 +5091,7 @@ where // counterparty's announcement_signatures. Thus, we only bother to send a // channel_update here if the channel is not public, i.e. we're not sending an // announcement_signatures. - log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().channel_id())); + log_trace!(self.logger, Some(*counterparty_node_id), Some(*chan.key()), "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().channel_id())); if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) { peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { node_id: counterparty_node_id.clone(), @@ -5126,7 +5126,7 @@ where hash_map::Entry::Occupied(mut chan_entry) => { if !chan_entry.get().received_shutdown() { - log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.", + log_info!(self.logger, Some(*counterparty_node_id), Some(msg.channel_id), "Received a shutdown message from our counterparty for channel {}{}.", log_bytes!(msg.channel_id), if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" }); } @@ -5198,7 +5198,7 @@ where } }; if let Some(broadcast_tx) = tx { - log_info!(self.logger, "Broadcasting {}", log_tx!(broadcast_tx)); + log_info!(self.logger, Some(*counterparty_node_id), None, "Broadcasting {}", log_tx!(broadcast_tx)); self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]); } if let Some(chan) = chan_option { @@ -5392,7 +5392,7 @@ where prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info }); }, hash_map::Entry::Occupied(_) => { - log_info!(self.logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid); + log_info!(self.logger, None, None, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid); let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: prev_short_channel_id, outpoint: prev_funding_outpoint, @@ -5573,7 +5573,7 @@ where if were_node_one == msg_from_node_one { return Ok(NotifyOption::SkipPersist); } else { - log_debug!(self.logger, "Received channel_update for channel {}.", log_bytes!(chan_id)); + log_debug!(self.logger, Some(*counterparty_node_id), Some(chan_id), "Received channel_update for channel {}.", log_bytes!(chan_id)); try_chan_entry!(self, chan.get_mut().channel_update(&msg), chan); } }, @@ -5655,10 +5655,10 @@ where match monitor_event { MonitorEvent::HTLCEvent(htlc_update) => { if let Some(preimage) = htlc_update.payment_preimage { - log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0)); + log_trace!(self.logger, None, None, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0)); self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id()); } else { - log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0)); + log_trace!(self.logger, counterparty_node_id, Some(funding_outpoint.to_channel_id()), "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0)); let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() }; let reason = HTLCFailReason::from_failure_code(0x4000 | 8); self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver); @@ -5821,7 +5821,7 @@ where self.issue_channel_close_events(chan, ClosureReason::CooperativeClosure); - log_info!(self.logger, "Broadcasting {}", log_tx!(tx)); + log_info!(self.logger, None, Some(chan.channel_id()), "Broadcasting {}", log_tx!(tx)); self.tx_broadcaster.broadcast_transactions(&[&tx]); update_maps_on_chan_removal!(self, chan); false @@ -6155,7 +6155,7 @@ where // Check that, while holding the peer lock, we don't have anything else // blocking monitor updates for this channel. If we do, release the monitor // update(s) when those blockers complete. - log_trace!(self.logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first", + log_trace!(self.logger, None, None, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first", log_bytes!(&channel_funding_outpoint.to_channel_id()[..])); break; } @@ -6163,7 +6163,7 @@ where if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(channel_funding_outpoint.to_channel_id()) { debug_assert_eq!(chan.get().get_funding_txo().unwrap(), channel_funding_outpoint); if let Some((monitor_update, further_update_exists)) = chan.get_mut().unblock_next_blocked_monitor_update() { - log_debug!(self.logger, "Unlocking monitor updating for channel {} and updating monitor", + log_debug!(self.logger, Some(counterparty_node_id), Some(channel_funding_outpoint.to_channel_id()), "Unlocking monitor updating for channel {} and updating monitor", log_bytes!(&channel_funding_outpoint.to_channel_id()[..])); let update_res = self.chain_monitor.update_channel(channel_funding_outpoint, monitor_update); let update_id = monitor_update.update_id; @@ -6178,12 +6178,12 @@ where continue; } } else { - log_trace!(self.logger, "Unlocked monitor updating for channel {} without monitors to update", + log_trace!(self.logger, Some(counterparty_node_id), Some(channel_funding_outpoint.to_channel_id()), "Unlocked monitor updating for channel {} without monitors to update", log_bytes!(&channel_funding_outpoint.to_channel_id()[..])); } } } else { - log_debug!(self.logger, + log_debug!(self.logger, Some(counterparty_node_id), Some(channel_funding_outpoint.to_channel_id()), "Got a release post-RAA monitor update for peer {} but the channel is gone", log_pubkey!(counterparty_node_id)); } @@ -6360,7 +6360,7 @@ where // See the docs for `ChannelManagerReadArgs` for more. let block_hash = header.block_hash(); - log_trace!(self.logger, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height); + log_trace!(self.logger, None, None, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height); let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist }); @@ -6380,7 +6380,7 @@ where // See the docs for `ChannelManagerReadArgs` for more. let block_hash = header.block_hash(); - log_trace!(self.logger, "New best block: {} at height {}", block_hash, height); + log_trace!(self.logger, None, None, "New best block: {} at height {}", block_hash, height); let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist }); @@ -6477,7 +6477,7 @@ where if let Some(channel_ready) = channel_ready_opt { send_channel_ready!(self, pending_msg_events, channel, channel_ready); if channel.is_usable() { - log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id())); + log_trace!(self.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id())); if let Ok(msg) = self.get_channel_update_for_unicast(channel) { pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { node_id: channel.get_counterparty_node_id(), @@ -6485,7 +6485,7 @@ where }); } } else { - log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.channel_id())); + log_trace!(self.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.channel_id())); } } @@ -6495,7 +6495,7 @@ where } if let Some(announcement_sigs) = announcement_sigs { - log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(channel.channel_id())); + log_trace!(self.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), "Sending announcement_signatures for channel {}", log_bytes!(channel.channel_id())); pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { node_id: channel.get_counterparty_node_id(), msg: announcement_sigs, @@ -6590,7 +6590,7 @@ where timed_out_htlcs.push((prev_hop_data, htlc.forward_info.payment_hash, HTLCFailReason::from_failure_code(0x2000 | 2), HTLCDestination::InvalidForward { requested_forward_scid })); - log_trace!(self.logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid); + log_trace!(self.logger, None, None, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid); false } else { true } }); @@ -6778,7 +6778,7 @@ where let mut failed_channels = Vec::new(); let mut per_peer_state = self.per_peer_state.write().unwrap(); let remove_peer = { - log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates.", + log_debug!(self.logger, Some(*counterparty_node_id), None, "Marking channels with {} disconnected and generating channel_updates.", log_pubkey!(counterparty_node_id)); if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); @@ -6852,7 +6852,7 @@ where fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init, inbound: bool) -> Result<(), ()> { if !init_msg.features.supports_static_remote_key() { - log_debug!(self.logger, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id)); + log_debug!(self.logger, Some(*counterparty_node_id), None, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id)); return Err(()); } @@ -6898,8 +6898,7 @@ where }, } } - - log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id)); + log_debug!(self.logger, Some(*counterparty_node_id), None, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id)); let per_peer_state = self.per_peer_state.read().unwrap(); for (_cp_id, peer_state_mutex) in per_peer_state.iter() { @@ -7941,22 +7940,22 @@ where if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) { if channel.get_latest_complete_monitor_update_id() > monitor.get_latest_update_id() { // If the channel is ahead of the monitor, return InvalidValue: - log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!"); - log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.", + log_error!(args.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!"); + log_error!(args.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.", log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_complete_monitor_update_id()); - log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); - log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); - log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds."); - log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning"); + log_error!(args.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), " The chain::Watch API *requires* that monitors are persisted durably before returning,"); + log_error!(args.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); + log_error!(args.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), " Without the latest ChannelMonitor we cannot continue without risking funds."); + log_error!(args.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning"); return Err(DecodeError::InvalidValue); } else if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() || channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() || channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() || channel.get_latest_monitor_update_id() < monitor.get_latest_update_id() { // But if the channel is behind of the monitor, close the channel: - log_error!(args.logger, "A ChannelManager is stale compared to the current ChannelMonitor!"); - log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast."); - log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.", + log_error!(args.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), "A ChannelManager is stale compared to the current ChannelMonitor!"); + log_error!(args.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast."); + log_error!(args.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.", log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id()); let (monitor_update, mut new_failed_htlcs) = channel.force_shutdown(true); if let Some((counterparty_node_id, funding_txo, update)) = monitor_update { @@ -7983,14 +7982,14 @@ where // claim update ChannelMonitor updates were persisted prior to persising // the ChannelMonitor update for the forward leg, so attempting to fail the // backwards leg of the HTLC will simply be rejected. - log_info!(args.logger, + log_info!(args.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), "Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager", log_bytes!(channel.channel_id()), log_bytes!(payment_hash.0)); failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.get_counterparty_node_id(), channel.channel_id())); } } } else { - log_info!(args.logger, "Successfully loaded channel {} at update_id {} against monitor at update id {}", + log_info!(args.logger, None, None, "Successfully loaded channel {} at update_id {} against monitor at update id {}", log_bytes!(channel.channel_id()), channel.get_latest_monitor_update_id(), monitor.get_latest_update_id()); channel.complete_all_mon_updates_through(monitor.get_latest_update_id()); @@ -8023,18 +8022,18 @@ where reason: ClosureReason::DisconnectedPeer, }, None)); } else { - log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.channel_id())); - log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); - log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); - log_error!(args.logger, " Without the ChannelMonitor we cannot continue without risking funds."); - log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning"); + log_error!(args.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.channel_id())); + log_error!(args.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), " The chain::Watch API *requires* that monitors are persisted durably before returning,"); + log_error!(args.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); + log_error!(args.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), " Without the ChannelMonitor we cannot continue without risking funds."); + log_error!(args.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning"); return Err(DecodeError::InvalidValue); } } for (funding_txo, _) in args.channel_monitors.iter() { if !funding_txo_set.contains(funding_txo) { - log_info!(args.logger, "Queueing monitor update to ensure missing channel {} is force closed", + log_info!(args.logger, None, Some(funding_txo.to_channel_id()), "Queueing monitor update to ensure missing channel {} is force closed", log_bytes!(funding_txo.to_channel_id())); let monitor_update = ChannelMonitorUpdate { update_id: CLOSED_CHANNEL_UPDATE_ID, @@ -8113,7 +8112,7 @@ where for (_, chan) in peer_state.channel_by_id.iter() { for update in chan.uncompleted_unblocked_mon_updates() { if let Some(funding_txo) = chan.get_funding_txo() { - log_trace!(args.logger, "Replaying ChannelMonitorUpdate {} for channel {}", + log_trace!(args.logger, None, None, "Replaying ChannelMonitorUpdate {} for channel {}", update.update_id, log_bytes!(funding_txo.to_channel_id())); pending_background_events.push( BackgroundEvent::MonitorUpdateRegeneratedOnStartup { @@ -8215,11 +8214,12 @@ where // We only rebuild the pending payments map if we were most recently serialized by // 0.0.102+ for (_, monitor) in args.channel_monitors.iter() { - if id_to_peer.get(&monitor.get_funding_txo().0.to_channel_id()).is_none() { + let chan_id = monitor.get_funding_txo().0.to_channel_id(); + if id_to_peer.get(&chan_id).is_none() { for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() { if let HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } = htlc_source { if path.hops.is_empty() { - log_error!(args.logger, "Got an empty path for a pending payment"); + log_error!(args.logger, None, Some(chan_id), "Got an empty path for a pending payment"); return Err(DecodeError::InvalidValue); } @@ -8229,7 +8229,7 @@ where match pending_outbounds.pending_outbound_payments.lock().unwrap().entry(payment_id) { hash_map::Entry::Occupied(mut entry) => { let newly_added = entry.get_mut().insert(session_priv_bytes, &path); - log_info!(args.logger, "{} a pending payment path for {} msat for session priv {} on an existing pending payment with payment hash {}", + log_info!(args.logger, None, Some(chan_id), "{} a pending payment path for {} msat for session priv {} on an existing pending payment with payment hash {}", if newly_added { "Added" } else { "Had" }, path_amt, log_bytes!(session_priv_bytes), log_bytes!(htlc.payment_hash.0)); }, hash_map::Entry::Vacant(entry) => { @@ -8248,7 +8248,7 @@ where total_msat: path_amt, starting_block_height: best_block_height, }); - log_info!(args.logger, "Added a pending payment for {} msat with payment hash {} for path with session priv {}", + log_info!(args.logger, None, Some(chan_id), "Added a pending payment for {} msat with payment hash {} for path with session priv {}", path_amt, log_bytes!(htlc.payment_hash.0), log_bytes!(session_priv_bytes)); } } @@ -8270,7 +8270,7 @@ where forwards.retain(|forward| { if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { if pending_forward_matches_htlc(&htlc_info) { - log_info!(args.logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", + log_info!(args.logger, None, Some(chan_id), "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id())); false } else { true } @@ -8280,7 +8280,7 @@ where }); pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| { if pending_forward_matches_htlc(&htlc_info) { - log_info!(args.logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", + log_info!(args.logger, None, Some(chan_id), "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id())); pending_events_read.retain(|(event, _)| { if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event { @@ -8367,7 +8367,7 @@ where None => match inbound_payment::verify(payment_hash, &hop_data, 0, &expanded_inbound_key, &args.logger) { Ok((payment_preimage, _)) => payment_preimage, Err(()) => { - log_error!(args.logger, "Failed to read claimable payment data for HTLC with payment hash {} - was not a pending inbound payment and didn't match our payment key", log_bytes!(payment_hash.0)); + log_error!(args.logger, None, None, "Failed to read claimable payment data for HTLC with payment hash {} - was not a pending inbound payment and didn't match our payment key", log_bytes!(payment_hash.0)); return Err(DecodeError::InvalidValue); } } @@ -8394,7 +8394,7 @@ where }; if let Some(network_pubkey) = received_network_pubkey { if network_pubkey != our_network_pubkey { - log_error!(args.logger, "Key that was generated does not match the existing key."); + log_error!(args.logger, None, None, "Key that was generated does not match the existing key."); return Err(DecodeError::InvalidValue); } } @@ -8415,14 +8415,14 @@ where } else if !outbound_scid_aliases.insert(chan.outbound_scid_alias()) { // Note that in rare cases its possible to hit this while reading an older // channel if we just happened to pick a colliding outbound alias above. - log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias()); + log_error!(args.logger, Some(*_peer_node_id), Some(*chan_id), "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias()); return Err(DecodeError::InvalidValue); } if chan.is_usable() { if short_to_chan_info.insert(chan.outbound_scid_alias(), (chan.get_counterparty_node_id(), *chan_id)).is_some() { // Note that in rare cases its possible to hit this while reading an older // channel if we just happened to pick a colliding outbound alias above. - log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias()); + log_error!(args.logger, Some(*_peer_node_id), Some(*chan_id), "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias()); return Err(DecodeError::InvalidValue); } } @@ -8434,7 +8434,7 @@ where for (_, monitor) in args.channel_monitors.iter() { for (payment_hash, payment_preimage) in monitor.get_stored_preimages() { if let Some(payment) = claimable_payments.remove(&payment_hash) { - log_info!(args.logger, "Re-claiming HTLCs with payment hash {} as we've released the preimage to a ChannelMonitor!", log_bytes!(payment_hash.0)); + log_info!(args.logger, None, None, "Re-claiming HTLCs with payment hash {} as we've released the preimage to a ChannelMonitor!", log_bytes!(payment_hash.0)); let mut claimable_amt_msat = 0; let mut receiver_node_id = Some(our_network_pubkey); let phantom_shared_secret = payment.htlcs[0].prev_hop.phantom_shared_secret; @@ -8502,7 +8502,7 @@ where } peer_state.lock().unwrap().monitor_update_blocked_actions = monitor_update_blocked_actions; } else { - log_error!(args.logger, "Got blocked actions without a per-peer-state for {}", node_id); + log_error!(args.logger, Some(node_id), None, "Got blocked actions without a per-peer-state for {}", node_id); return Err(DecodeError::InvalidValue); } } diff --git a/lightning/src/ln/inbound_payment.rs b/lightning/src/ln/inbound_payment.rs index 1b0fd1707b8..bbae95b83c0 100644 --- a/lightning/src/ln/inbound_payment.rs +++ b/lightning/src/ln/inbound_payment.rs @@ -26,6 +26,8 @@ use crate::util::logger::Logger; use core::convert::{TryFrom, TryInto}; use core::ops::Deref; +use crate::util::macro_logger::default_logging::*; + pub(crate) const IV_LEN: usize = 16; const METADATA_LEN: usize = 16; const METADATA_KEY_LEN: usize = 32; diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index 3b62c856334..c64ec8fa439 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -33,6 +33,8 @@ use crate::io::{Cursor, Read}; use core::convert::{AsMut, TryInto}; use core::ops::Deref; +use crate::util::macro_logger::default_logging::*; + pub(crate) struct OnionKeys { #[cfg(test)] pub(crate) shared_secret: SharedSecret, diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index a13a618d83c..fd1a8b2bf91 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -32,6 +32,8 @@ use core::ops::Deref; use crate::prelude::*; use crate::sync::Mutex; +use crate::util::macro_logger::default_logging::*; + /// Stores the session_priv for each part of a payment that is still pending. For versions 0.0.102 /// and later, also stores information for retrying the payment. pub(crate) enum PendingOutboundPayment { diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 7d85d0ba1e5..6673df6242a 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -47,6 +47,8 @@ use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::sha256::HashEngine as Sha256Engine; use bitcoin::hashes::{HashEngine, Hash}; +use crate::util::macro_logger::default_logging::*; + /// A handler provided to [`PeerManager`] for reading and handling custom messages. /// /// [BOLT 1] specifies a custom message type range for use with experimental or application-specific diff --git a/lightning/src/onion_message/messenger.rs b/lightning/src/onion_message/messenger.rs index 5171422cb89..f1149194847 100644 --- a/lightning/src/onion_message/messenger.rs +++ b/lightning/src/onion_message/messenger.rs @@ -32,6 +32,8 @@ use crate::io; use crate::sync::{Arc, Mutex}; use crate::prelude::*; +use crate::util::macro_logger::default_logging::*; + /// A sender, receiver and forwarder of onion messages. In upcoming releases, this object will be /// used to retrieve invoices and fulfill invoice requests from [offers]. Currently, only sending /// and receiving custom onion messages is supported. diff --git a/lightning/src/routing/gossip.rs b/lightning/src/routing/gossip.rs index dc6aeb9404e..013f2da54ca 100644 --- a/lightning/src/routing/gossip.rs +++ b/lightning/src/routing/gossip.rs @@ -50,6 +50,8 @@ use core::str::FromStr; #[cfg(feature = "std")] use std::time::{SystemTime, UNIX_EPOCH}; +use crate::util::macro_logger::default_logging::*; + /// We remove stale channel directional info two weeks after the last update, per BOLT 7's /// suggestion. const STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS: u64 = 60 * 60 * 24 * 14; @@ -1585,7 +1587,7 @@ impl NetworkGraph where L::Target: Logger { if msg.chain_hash != self.genesis_hash { return Err(LightningError { - err: "Channel announcement chain hash does not match genesis hash".to_owned(), + err: "Channel announcement chain hash does not match genesis hash".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Debug), }); } diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index ef6fef0a7eb..cff48c38837 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -32,6 +32,8 @@ use alloc::collections::BinaryHeap; use core::{cmp, fmt}; use core::ops::Deref; +use crate::util::macro_logger::default_logging::*; + /// A [`Router`] implemented using [`find_route`]. pub struct DefaultRouter>, L: Deref, S: Deref, SP: Sized, Sc: Score> where L::Target: Logger, diff --git a/lightning/src/routing/scoring.rs b/lightning/src/routing/scoring.rs index 235b1a148a5..4f070c6e795 100644 --- a/lightning/src/routing/scoring.rs +++ b/lightning/src/routing/scoring.rs @@ -72,6 +72,8 @@ use core::time::Duration; use crate::io::{self, Read}; use crate::sync::{Mutex, MutexGuard}; +use crate::util::macro_logger::default_logging::*; + /// We define Score ever-so-slightly differently based on whether we are being built for C bindings /// or not. For users, `LockableScore` must somehow be writeable to disk. For Rust users, this is /// no problem - you move a `Score` that implements `Writeable` into a `Mutex`, lock it, and now diff --git a/lightning/src/util/logger.rs b/lightning/src/util/logger.rs index aac83f42a3c..5577eda079e 100644 --- a/lightning/src/util/logger.rs +++ b/lightning/src/util/logger.rs @@ -95,6 +95,11 @@ impl Level { pub struct Record<'a> { /// The verbosity level of the message. pub level: Level, + /// Peer pubkey, this is the peer that is relevant in this record + pub peer_id: Option, + /// Channel Id, this is the channel that is relevant in this record + /// Note: could be a temporary channel_id or 'real' channel_id + pub channel_id: Option<[u8; 32]>, #[cfg(not(c_bindings))] /// The message body. pub args: fmt::Arguments<'a>, @@ -119,9 +124,11 @@ impl<'a> Record<'a> { /// /// This is not exported to bindings users as fmt can't be used in C #[inline] - pub fn new(level: Level, args: fmt::Arguments<'a>, module_path: &'static str, file: &'static str, line: u32) -> Record<'a> { + pub fn new(level: Level, peer_id: Option, channel_id: Option<[u8; 32]>, args: fmt::Arguments<'a>, module_path: &'static str, file: &'static str, line: u32) -> Record<'a> { Record { level, + peer_id, + channel_id, #[cfg(not(c_bindings))] args, #[cfg(c_bindings)] @@ -194,12 +201,12 @@ mod tests { } fn call_macros(&self) { - log_error!(self.logger, "This is an error"); - log_warn!(self.logger, "This is a warning"); - log_info!(self.logger, "This is an info"); - log_debug!(self.logger, "This is a debug"); - log_trace!(self.logger, "This is a trace"); - log_gossip!(self.logger, "This is a gossip"); + _log_error!(self.logger, "This is an error"); + _log_warn!(self.logger, "This is a warning"); + _log_info!(self.logger, "This is an info"); + _log_debug!(self.logger, "This is a debug"); + _log_trace!(self.logger, "This is a trace"); + _log_gossip!(self.logger, "This is a gossip"); } } diff --git a/lightning/src/util/macro_logger.rs b/lightning/src/util/macro_logger.rs index 8742e8e84d0..7b8832e7d2b 100644 --- a/lightning/src/util/macro_logger.rs +++ b/lightning/src/util/macro_logger.rs @@ -150,35 +150,37 @@ macro_rules! log_spendable { $crate::util::macro_logger::DebugSpendable(&$obj) } } - -/// Create a new Record and log it. You probably don't want to use this macro directly, +/// Internal Logging +/// +/// ** You probably don't want to use these macros directly, ** +/// +/// Create a new Record and log it. /// but it needs to be exported so `log_trace` etc can use it in external crates. #[doc(hidden)] #[macro_export] macro_rules! log_internal { - ($logger: expr, $lvl:expr, $($arg:tt)+) => ( - $logger.log(&$crate::util::logger::Record::new($lvl, format_args!($($arg)+), module_path!(), file!(), line!())) + ($logger: expr, $lvl:expr, $peer_id:expr, $channel_id:expr, $($arg:tt)+) => ( + $logger.log(&$crate::util::logger::Record::new($lvl, $peer_id, $channel_id, format_args!($($arg)+), module_path!(), file!(), line!())) ); } - /// Logs an entry at the given level. #[doc(hidden)] #[macro_export] -macro_rules! log_given_level { - ($logger: expr, $lvl:expr, $($arg:tt)+) => ( +macro_rules! __log_given_level { + ($logger: expr, $lvl:expr, $peer_id:expr, $channel_id:expr, $($arg:tt)+) => ( match $lvl { #[cfg(not(any(feature = "max_level_off")))] - $crate::util::logger::Level::Error => $crate::log_internal!($logger, $lvl, $($arg)*), + $crate::util::logger::Level::Error => $crate::log_internal!($logger, $lvl, $peer_id, $channel_id, $($arg)*), #[cfg(not(any(feature = "max_level_off", feature = "max_level_error")))] - $crate::util::logger::Level::Warn => $crate::log_internal!($logger, $lvl, $($arg)*), + $crate::util::logger::Level::Warn => $crate::log_internal!($logger, $lvl, $peer_id, $channel_id, $($arg)*), #[cfg(not(any(feature = "max_level_off", feature = "max_level_error", feature = "max_level_warn")))] - $crate::util::logger::Level::Info => $crate::log_internal!($logger, $lvl, $($arg)*), + $crate::util::logger::Level::Info => $crate::log_internal!($logger, $lvl, $peer_id, $channel_id, $($arg)*), #[cfg(not(any(feature = "max_level_off", feature = "max_level_error", feature = "max_level_warn", feature = "max_level_info")))] - $crate::util::logger::Level::Debug => $crate::log_internal!($logger, $lvl, $($arg)*), + $crate::util::logger::Level::Debug => $crate::log_internal!($logger, $lvl, $peer_id, $channel_id, $($arg)*), #[cfg(not(any(feature = "max_level_off", feature = "max_level_error", feature = "max_level_warn", feature = "max_level_info", feature = "max_level_debug")))] - $crate::util::logger::Level::Trace => $crate::log_internal!($logger, $lvl, $($arg)*), + $crate::util::logger::Level::Trace => $crate::log_internal!($logger, $lvl, $peer_id, $channel_id, $($arg)*), #[cfg(not(any(feature = "max_level_off", feature = "max_level_error", feature = "max_level_warn", feature = "max_level_info", feature = "max_level_debug", feature = "max_level_trace")))] - $crate::util::logger::Level::Gossip => $crate::log_internal!($logger, $lvl, $($arg)*), + $crate::util::logger::Level::Gossip => $crate::log_internal!($logger, $lvl, $peer_id, $channel_id, $($arg)*), #[cfg(any(feature = "max_level_off", feature = "max_level_error", feature = "max_level_warn", feature = "max_level_info", feature = "max_level_debug", feature = "max_level_trace"))] _ => { @@ -188,50 +190,176 @@ macro_rules! log_given_level { ); } -/// Log at the `ERROR` level. +/// Default Logging +/// +/// Default logging is the basic logging type suitable for use throughout the system. however, +/// in some cases, developers should want to enforce furthur context logging on files that +/// are suitable. for example, logging peer_id and channel_id in channel** files. +/// In those cases, context logging enforcement could be added to allow compiler to recommend +/// logging extra necessary values +/// +/// Example use from lightning crate: +/// ``` +/// use crate::util::macro_logger::default_logging::*; +/// fn main() { +/// log_trace!(logger, "log message {}, {}", arg1, arg2) +/// log_error!(logger, "log message {}, {}", arg1, arg2) +/// } +/// ``` +/// Example use from external crate: +/// ``` +/// use lightning::{_log_error as log_error, _log_trace as log_trace}; +/// fn main() { +/// log_trace!(logger, "log message {}, {}", arg1, arg2) +/// log_error!(logger, "log message {}, {}", arg1, arg2) +/// } +/// ``` +/// +#[doc(hidden)] +#[macro_export] +macro_rules! _log_given_level { + ($logger: expr, $lvl:expr, $($arg:tt)+) => ( + $crate::__log_given_level!($logger, $lvl, None, None, $($arg)*); + ); +} +#[doc(hidden)] #[macro_export] -macro_rules! log_error { +macro_rules! _log_debug { ($logger: expr, $($arg:tt)*) => ( - $crate::log_given_level!($logger, $crate::util::logger::Level::Error, $($arg)*); + $crate::__log_given_level!($logger, $crate::util::logger::Level::Debug, None, None, $($arg)*); ) } - -/// Log at the `WARN` level. +#[doc(hidden)] #[macro_export] -macro_rules! log_warn { +macro_rules! _log_error { ($logger: expr, $($arg:tt)*) => ( - $crate::log_given_level!($logger, $crate::util::logger::Level::Warn, $($arg)*); + $crate::__log_given_level!($logger, $crate::util::logger::Level::Error, None, None, $($arg)*); ) } - -/// Log at the `INFO` level. +#[doc(hidden)] #[macro_export] -macro_rules! log_info { +macro_rules! _log_warn { ($logger: expr, $($arg:tt)*) => ( - $crate::log_given_level!($logger, $crate::util::logger::Level::Info, $($arg)*); + $crate::__log_given_level!($logger, $crate::util::logger::Level::Warn, None, None, $($arg)*); ) } - -/// Log at the `DEBUG` level. +#[doc(hidden)] #[macro_export] -macro_rules! log_debug { +macro_rules! _log_info { ($logger: expr, $($arg:tt)*) => ( - $crate::log_given_level!($logger, $crate::util::logger::Level::Debug, $($arg)*); + $crate::__log_given_level!($logger, $crate::util::logger::Level::Info, None, None, $($arg)*); ) } - -/// Log at the `TRACE` level. +#[doc(hidden)] #[macro_export] -macro_rules! log_trace { +macro_rules! _log_trace { ($logger: expr, $($arg:tt)*) => ( - $crate::log_given_level!($logger, $crate::util::logger::Level::Trace, $($arg)*) + $crate::__log_given_level!($logger, $crate::util::logger::Level::Trace, None, None, $($arg)*) ) } - -/// Log at the `GOSSIP` level. +#[doc(hidden)] #[macro_export] -macro_rules! log_gossip { +macro_rules! _log_gossip { ($logger: expr, $($arg:tt)*) => ( - $crate::log_given_level!($logger, $crate::util::logger::Level::Gossip, $($arg)*); + $crate::__log_given_level!($logger, $crate::util::logger::Level::Gossip, None, None, $($arg)*); + ) +} + +pub(crate) mod default_logging { + pub(crate) use _log_given_level as log_given_level; + pub(crate) use _log_debug as log_debug; + pub(crate) use _log_warn as log_warn; + pub(crate) use _log_error as log_error; + pub(crate) use _log_trace as log_trace; + pub(crate) use _log_info as log_info; + pub(crate) use _log_gossip as log_gossip; +} + + +/// Context Logging +/// +/// In some files / modules it is semantically logical for us to include, in the logs, furthur context +/// of the operating environment, this currently includes the public key of the peer as well as the +/// channel_id (temporary or 'real'). +/// For example in the case of chain** files, since the channels and peers aren't available / setup, +/// peer_id and channel_id are irrelevant and Default Logging (above) should be used instead +/// +/// Example use within the lightning crate: +/// ``` +/// use crate::util::macro_logger::context_logging::*; +/// +/// fn main() { +/// log_trace!(logger, peer_id, channel_id, "log message {}, {}", arg1, arg2) +/// log_error!(logger, peer_id, channel_id, "log message {}, {}", arg1, arg2) +/// } +/// ``` +/// Example use from external crate: +/// ``` +/// use lightning::{_log_trace_with_context as log_trace, _log_error_with_context as log_error}; +/// +/// fn main() { +/// log_trace!(logger, peer_id, channel_id, "log message {}, {}", arg1, arg2) +/// log_error!(logger, peer_id, channel_id, "log message {}, {}", arg1, arg2) +/// } +/// ``` +/// +#[doc(hidden)] +#[macro_export] +macro_rules! _log_given_level_with_context { + ($logger: expr, $lvl:expr, $peer_id:expr, $channel_id:expr, $($arg:tt)+) => ( + $crate::__log_given_level!($logger, $lvl, $peer_id, $channel_id, $($arg)*); + ); +} +#[doc(hidden)] +#[macro_export] +macro_rules! _log_error_with_context { + ($logger: expr, $peer_id:expr, $channel_id:expr, $($arg:tt)*) => ( + $crate::__log_given_level!($logger, $crate::util::logger::Level::Error, $peer_id, $channel_id, $($arg)*); + ) +} +#[doc(hidden)] +#[macro_export] +macro_rules! _log_warn_with_context { + ($logger: expr, $peer_id:expr, $channel_id:expr, $($arg:tt)*) => ( + $crate::__log_given_level!($logger, $crate::util::logger::Level::Warn, $peer_id, $channel_id, $($arg)*); ) } +#[doc(hidden)] +#[macro_export] +macro_rules! _log_info_with_context { + ($logger: expr, $peer_id:expr, $channel_id:expr, $($arg:tt)*) => ( + $crate::__log_given_level!($logger, $crate::util::logger::Level::Info, $peer_id, $channel_id, $($arg)*); + ) +} +#[doc(hidden)] +#[macro_export] +macro_rules! _log_debug_with_context { + ($logger: expr, $peer_id:expr, $channel_id:expr, $($arg:tt)*) => ( + $crate::__log_given_level!($logger, $crate::util::logger::Level::Debug, $peer_id, $channel_id, $($arg)*); + ) +} +#[doc(hidden)] +#[macro_export] +macro_rules! _log_trace_with_context { + ($logger: expr, $peer_id:expr, $channel_id:expr, $($arg:tt)*) => ( + $crate::__log_given_level!($logger, $crate::util::logger::Level::Trace, $peer_id, $channel_id, $($arg)*); + ) +} +#[doc(hidden)] +#[macro_export] +macro_rules! _log_gossip_with_context { + ($logger: expr, $peer_id:expr, $channel_id:expr, $($arg:tt)*) => ( + $crate::__log_given_level!($logger, $crate::util::logger::Level::Gossip, $peer_id, $channel_id, $($arg)*); + ) +} + +pub(crate) mod context_logging { + #![allow(unused_imports)] + pub(crate) use _log_given_level_with_context as log_given_level; + pub(crate) use _log_debug_with_context as log_debug; + pub(crate) use _log_warn_with_context as log_warn; + pub(crate) use _log_error_with_context as log_error; + pub(crate) use _log_trace_with_context as log_trace; + pub(crate) use _log_info_with_context as log_info; + pub(crate) use _log_gossip_with_context as log_gossip; +}