diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index a6cce74dd7e..c293a0cc5af 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1480,7 +1480,6 @@ macro_rules! handle_error { }, None)); } } - log_error!($self.logger, None, None, "{}", err.err); if let msgs::ErrorAction::IgnoreError = err.action { } else { @@ -1536,7 +1535,7 @@ macro_rules! convert_chan_err { (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone())) }, ChannelError::Close(msg) => { - log_error!($self.logger, None, None, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg); + log_error!($self.logger, None, Some($channel_id.clone()), "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg); update_maps_on_chan_removal!($self, $channel); let shutdown_res = $channel.force_shutdown(true); (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(), @@ -1692,12 +1691,12 @@ macro_rules! handle_new_monitor_update { debug_assert_ne!($self.id_to_peer.held_by_thread(), LockHeldState::HeldByThread); match $update_res { ChannelMonitorUpdateStatus::InProgress => { - log_debug!($self.logger, None, None, "ChannelMonitor update for {} in flight, holding messages until the update completes.", + log_debug!($self.logger, None, Some($chan.channel_id()), "ChannelMonitor update for {} in flight, holding messages until the update completes.", log_bytes!($chan.channel_id()[..])); Ok(()) }, ChannelMonitorUpdateStatus::PermanentFailure => { - log_error!($self.logger, None, None, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure", + log_error!($self.logger, None, Some($chan.channel_id()), "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure", log_bytes!($chan.channel_id()[..])); update_maps_on_chan_removal!($self, $chan); let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown( @@ -2227,7 +2226,7 @@ where return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), peer_node_id) }); } }; - log_error!(self.logger, None, None, "Force-closing channel {}", log_bytes!(channel_id[..])); + log_error!(self.logger, Some(*peer_node_id), Some(*channel_id), "Force-closing channel {}", log_bytes!(channel_id[..])); self.finish_force_close_channel(chan.force_shutdown(broadcast)); if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { let mut peer_state = peer_state_mutex.lock().unwrap(); @@ -2398,7 +2397,7 @@ where macro_rules! return_malformed_err { ($msg: expr, $err_code: expr) => { { - log_info!(self.logger, None, None, "Failed to accept/forward incoming HTLC: {}", $msg); + log_info!(self.logger, None, Some(msg.channel_id), "Failed to accept/forward incoming HTLC: {}", $msg); return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC { channel_id: msg.channel_id, htlc_id: msg.htlc_id, @@ -2429,7 +2428,7 @@ where macro_rules! return_err { ($msg: expr, $err_code: expr, $data: expr) => { { - log_info!(self.logger, None, None, "Failed to accept/forward incoming HTLC: {}", $msg); + log_info!(self.logger, None, Some(msg.channel_id), "Failed to accept/forward incoming HTLC: {}", $msg); return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { channel_id: msg.channel_id, htlc_id: msg.htlc_id, @@ -2656,7 +2655,7 @@ where if chan.get_short_channel_id().is_none() { return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}); } - log_trace!(self.logger, None, None, "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.channel_id())); + log_trace!(self.logger, None, Some(chan.channel_id()), "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.channel_id())); self.get_channel_update_for_unicast(chan) } @@ -2672,7 +2671,7 @@ where /// [`channel_update`]: msgs::ChannelUpdate /// [`internal_closing_signed`]: Self::internal_closing_signed fn get_channel_update_for_unicast(&self, chan: &Channel<::Signer>) -> Result { - log_trace!(self.logger, None, None, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id())); + log_trace!(self.logger, None, Some(chan.channel_id()), "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id())); let short_channel_id = match chan.get_short_channel_id().or(chan.latest_inbound_scid_alias()) { None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}), Some(id) => id, @@ -2681,7 +2680,7 @@ where self.get_channel_update_for_onion(short_channel_id, chan) } fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<::Signer>) -> Result { - log_trace!(self.logger, None, None, "Generating channel update for channel {}", log_bytes!(chan.channel_id())); + log_trace!(self.logger, None, Some(chan.channel_id()), "Generating channel update for channel {}", log_bytes!(chan.channel_id())); let were_node_one = self.our_network_pubkey.serialize()[..] < chan.get_counterparty_node_id().serialize()[..]; let enabled = chan.is_usable() && match chan.channel_update_status() { @@ -2724,8 +2723,7 @@ where fn send_payment_along_path(&self, path: &Path, payment_hash: &PaymentHash, recipient_onion: RecipientOnionFields, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option, session_priv_bytes: [u8; 32]) -> Result<(), APIError> { // The top-level caller should hold the total_consistency_lock read lock. debug_assert!(self.total_consistency_lock.try_write().is_err()); - - log_trace!(self.logger, None, None, "Attempting to send payment for path with next hop {}", path.hops.first().unwrap().short_channel_id); + log_trace!(self.logger, Some(path.hops.first().unwrap().pubkey), None, "Attempting to send payment for path with next hop {}", path.hops.first().unwrap().short_channel_id); let prng_seed = self.entropy_source.get_secure_random_bytes(); let session_priv = SecretKey::from_slice(&session_priv_bytes[..]).expect("RNG is busted"); @@ -3426,7 +3424,7 @@ where routing: PendingHTLCRouting::Forward { onion_packet, .. }, incoming_amt_msat: _, }, }) => { - log_trace!(self.logger, None, None, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, log_bytes!(payment_hash.0), short_chan_id); + log_trace!(self.logger, None, Some(chan.key().clone()), "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, log_bytes!(payment_hash.0), short_chan_id); let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: prev_short_channel_id, outpoint: prev_funding_outpoint, @@ -3440,7 +3438,7 @@ where onion_packet, &self.logger) { if let ChannelError::Ignore(msg) = e { - log_trace!(self.logger, None, None, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg); + log_trace!(self.logger, None, Some(chan.key().clone()), "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg); } else { panic!("Stated return value requirements in send_htlc() were not met"); } @@ -3456,12 +3454,12 @@ where panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward"); }, HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => { - log_trace!(self.logger, None, None, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); + log_trace!(self.logger, None, Some(chan.key().clone()), "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); if let Err(e) = chan.get_mut().queue_fail_htlc( htlc_id, err_packet, &self.logger ) { if let ChannelError::Ignore(msg) = e { - log_trace!(self.logger, None, None, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg); + log_trace!(self.logger, None, Some(chan.key().clone()), "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg); } else { panic!("Stated return value requirements in queue_fail_htlc() were not met"); } @@ -3797,16 +3795,16 @@ where if !chan.is_outbound() { return NotifyOption::SkipPersist; } // If the feerate has decreased by less than half, don't bother if new_feerate <= chan.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.get_feerate_sat_per_1000_weight() { - log_trace!(self.logger, None, None, "Channel {} does not qualify for a feerate change from {} to {}.", + log_trace!(self.logger, None, Some(*chan_id), "Channel {} does not qualify for a feerate change from {} to {}.", log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate); return NotifyOption::SkipPersist; } if !chan.is_live() { - log_trace!(self.logger, None, None, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).", + log_trace!(self.logger, None, Some(*chan_id), "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).", log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate); return NotifyOption::SkipPersist; } - log_trace!(self.logger, None, None, "Channel {} qualifies for a feerate change from {} to {}.", + log_trace!(self.logger, None, Some(*chan_id), "Channel {} qualifies for a feerate change from {} to {}.", log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate); chan.queue_update_fee(new_feerate, &self.logger); @@ -4347,7 +4345,7 @@ where if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res { if let Some(action) = completion_action(Some(htlc_value_msat)) { - log_trace!(self.logger, None, None, "Tracking monitor update completion action for channel {}: {:?}", + log_trace!(self.logger, Some(counterparty_node_id), Some(chan_id), "Tracking monitor update completion action for channel {}: {:?}", log_bytes!(chan_id), action); peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action); } @@ -4359,7 +4357,7 @@ where // TODO: This is a *critical* error - we probably updated the outbound edge // of the HTLC's monitor with a preimage. We should retry this monitor // update over and over again until morale improves. - log_error!(self.logger, None, None, "Failed to update channel monitor with preimage {:?}", payment_preimage); + log_error!(self.logger, Some(counterparty_node_id), Some(chan_id), "Failed to update channel monitor with preimage {:?}", payment_preimage); return Err((counterparty_node_id, e)); } } @@ -4462,7 +4460,7 @@ where pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option, channel_ready: Option, announcement_sigs: Option) -> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> { - log_trace!(self.logger, None, None, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement", + log_trace!(self.logger, None, Some(channel.channel_id()), "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement", log_bytes!(channel.channel_id()), if raa.is_some() { "an" } else { "no" }, if commitment_update.is_some() { "a" } else { "no" }, pending_forwards.len(), @@ -4556,7 +4554,7 @@ where hash_map::Entry::Vacant(_) => return, } }; - log_trace!(self.logger, None, None, "ChannelMonitor updated to {}. Current highest is {}", + log_trace!(self.logger, None, Some(funding_txo.to_channel_id()), "ChannelMonitor updated to {}. Current highest is {}", highest_applied_update_id, channel.get().get_latest_monitor_update_id()); if !channel.get().is_awaiting_monitor_update() || channel.get().get_latest_monitor_update_id() != highest_applied_update_id { return; @@ -4928,7 +4926,7 @@ where let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, &self.node_signer, self.genesis_hash.clone(), &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan); if let Some(announcement_sigs) = announcement_sigs_opt { - log_trace!(self.logger, None, None, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().channel_id())); + log_trace!(self.logger, Some(*counterparty_node_id), Some(chan.get().channel_id()), "Sending announcement_signatures for channel {}", log_bytes!(chan.get().channel_id())); peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { node_id: counterparty_node_id.clone(), msg: announcement_sigs, @@ -4939,7 +4937,7 @@ where // counterparty's announcement_signatures. Thus, we only bother to send a // channel_update here if the channel is not public, i.e. we're not sending an // announcement_signatures. - log_trace!(self.logger, None, None, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().channel_id())); + log_trace!(self.logger, Some(*counterparty_node_id), Some(*chan.key()), "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().channel_id())); if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) { peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { node_id: counterparty_node_id.clone(), @@ -4974,7 +4972,7 @@ where hash_map::Entry::Occupied(mut chan_entry) => { if !chan_entry.get().received_shutdown() { - log_info!(self.logger, None, None, "Received a shutdown message from our counterparty for channel {}{}.", + log_info!(self.logger, Some(*counterparty_node_id), Some(msg.channel_id), "Received a shutdown message from our counterparty for channel {}{}.", log_bytes!(msg.channel_id), if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" }); } @@ -5046,7 +5044,7 @@ where } }; if let Some(broadcast_tx) = tx { - log_info!(self.logger, None, None, "Broadcasting {}", log_tx!(broadcast_tx)); + log_info!(self.logger, Some(*counterparty_node_id), None, "Broadcasting {}", log_tx!(broadcast_tx)); self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]); } if let Some(chan) = chan_option { @@ -5403,7 +5401,7 @@ where if were_node_one == msg_from_node_one { return Ok(NotifyOption::SkipPersist); } else { - log_debug!(self.logger, None, None, "Received channel_update for channel {}.", log_bytes!(chan_id)); + log_debug!(self.logger, Some(*counterparty_node_id), Some(chan_id), "Received channel_update for channel {}.", log_bytes!(chan_id)); try_chan_entry!(self, chan.get_mut().channel_update(&msg), chan); } }, @@ -5488,7 +5486,7 @@ where log_trace!(self.logger, None, None, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0)); self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id()); } else { - log_trace!(self.logger, None, None, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0)); + log_trace!(self.logger, counterparty_node_id, Some(funding_outpoint.to_channel_id()), "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0)); let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() }; let reason = HTLCFailReason::from_failure_code(0x4000 | 8); self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver); @@ -5656,7 +5654,7 @@ where self.issue_channel_close_events(chan, ClosureReason::CooperativeClosure); - log_info!(self.logger, None, None, "Broadcasting {}", log_tx!(tx)); + log_info!(self.logger, None, Some(chan.channel_id()), "Broadcasting {}", log_tx!(tx)); self.tx_broadcaster.broadcast_transactions(&[&tx]); update_maps_on_chan_removal!(self, chan); false @@ -5976,14 +5974,14 @@ where // Check that, while holding the peer lock, we don't have another event // blocking any monitor updates for this channel. If we do, let those // events be the ones that ultimately release the monitor update(s). - log_trace!(self.logger, None, None, "Delaying monitor unlock for channel {} as another event is pending", + log_trace!(self.logger, Some(counterparty_node_id), Some(channel_funding_outpoint.to_channel_id()), "Delaying monitor unlock for channel {} as another event is pending", log_bytes!(&channel_funding_outpoint.to_channel_id()[..])); break; } if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(channel_funding_outpoint.to_channel_id()) { debug_assert_eq!(chan.get().get_funding_txo().unwrap(), channel_funding_outpoint); if let Some((monitor_update, further_update_exists)) = chan.get_mut().unblock_next_blocked_monitor_update() { - log_debug!(self.logger, None, None, "Unlocking monitor updating for channel {} and updating monitor", + log_debug!(self.logger, Some(counterparty_node_id), Some(channel_funding_outpoint.to_channel_id()), "Unlocking monitor updating for channel {} and updating monitor", log_bytes!(&channel_funding_outpoint.to_channel_id()[..])); let update_res = self.chain_monitor.update_channel(channel_funding_outpoint, monitor_update); let update_id = monitor_update.update_id; @@ -5998,12 +5996,12 @@ where continue; } } else { - log_trace!(self.logger, None, None, "Unlocked monitor updating for channel {} without monitors to update", + log_trace!(self.logger, Some(counterparty_node_id), Some(channel_funding_outpoint.to_channel_id()), "Unlocked monitor updating for channel {} without monitors to update", log_bytes!(&channel_funding_outpoint.to_channel_id()[..])); } } } else { - log_debug!(self.logger, None, None, + log_debug!(self.logger, Some(counterparty_node_id), Some(channel_funding_outpoint.to_channel_id()), "Got a release post-RAA monitor update for peer {} but the channel is gone", log_pubkey!(counterparty_node_id)); } @@ -6294,7 +6292,7 @@ where if let Some(channel_ready) = channel_ready_opt { send_channel_ready!(self, pending_msg_events, channel, channel_ready); if channel.is_usable() { - log_trace!(self.logger, None, None, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id())); + log_trace!(self.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id())); if let Ok(msg) = self.get_channel_update_for_unicast(channel) { pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { node_id: channel.get_counterparty_node_id(), @@ -6302,7 +6300,7 @@ where }); } } else { - log_trace!(self.logger, None, None, "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.channel_id())); + log_trace!(self.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.channel_id())); } } @@ -6312,7 +6310,7 @@ where } if let Some(announcement_sigs) = announcement_sigs { - log_trace!(self.logger, None, None, "Sending announcement_signatures for channel {}", log_bytes!(channel.channel_id())); + log_trace!(self.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), "Sending announcement_signatures for channel {}", log_bytes!(channel.channel_id())); pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { node_id: channel.get_counterparty_node_id(), msg: announcement_sigs, @@ -6594,7 +6592,7 @@ where let mut failed_channels = Vec::new(); let mut per_peer_state = self.per_peer_state.write().unwrap(); let remove_peer = { - log_debug!(self.logger, None, None, "Marking channels with {} disconnected and generating channel_updates.", + log_debug!(self.logger, Some(*counterparty_node_id), None, "Marking channels with {} disconnected and generating channel_updates.", log_pubkey!(counterparty_node_id)); if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); @@ -6668,7 +6666,7 @@ where fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init, inbound: bool) -> Result<(), ()> { if !init_msg.features.supports_static_remote_key() { - log_debug!(self.logger, None, None, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id)); + log_debug!(self.logger, Some(*counterparty_node_id), None, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id)); return Err(()); } @@ -6713,7 +6711,7 @@ where }, } } - log_debug!(self.logger, None, None, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id)); + log_debug!(self.logger, Some(*counterparty_node_id), None, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id)); let per_peer_state = self.per_peer_state.read().unwrap(); for (_cp_id, peer_state_mutex) in per_peer_state.iter() { @@ -7751,22 +7749,22 @@ where if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) { if channel.get_latest_complete_monitor_update_id() > monitor.get_latest_update_id() { // If the channel is ahead of the monitor, return InvalidValue: - log_error!(args.logger, None, None, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!"); - log_error!(args.logger, None, None, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.", + log_error!(args.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!"); + log_error!(args.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.", log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_complete_monitor_update_id()); - log_error!(args.logger, None, None, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); - log_error!(args.logger, None, None, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); - log_error!(args.logger, None, None, " Without the latest ChannelMonitor we cannot continue without risking funds."); - log_error!(args.logger, None, None, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning"); + log_error!(args.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), " The chain::Watch API *requires* that monitors are persisted durably before returning,"); + log_error!(args.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); + log_error!(args.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), " Without the latest ChannelMonitor we cannot continue without risking funds."); + log_error!(args.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning"); return Err(DecodeError::InvalidValue); } else if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() || channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() || channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() || channel.get_latest_monitor_update_id() < monitor.get_latest_update_id() { // But if the channel is behind of the monitor, close the channel: - log_error!(args.logger, None, None, "A ChannelManager is stale compared to the current ChannelMonitor!"); - log_error!(args.logger, None, None, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast."); - log_error!(args.logger, None, None, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.", + log_error!(args.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), "A ChannelManager is stale compared to the current ChannelMonitor!"); + log_error!(args.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast."); + log_error!(args.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.", log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id()); let (monitor_update, mut new_failed_htlcs) = channel.force_shutdown(true); if let Some(monitor_update) = monitor_update { @@ -7791,14 +7789,14 @@ where // claim update ChannelMonitor updates were persisted prior to persising // the ChannelMonitor update for the forward leg, so attempting to fail the // backwards leg of the HTLC will simply be rejected. - log_info!(args.logger, None, None, + log_info!(args.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), "Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager", log_bytes!(channel.channel_id()), log_bytes!(payment_hash.0)); failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.get_counterparty_node_id(), channel.channel_id())); } } } else { - log_info!(args.logger, None, None, "Successfully loaded channel {}", log_bytes!(channel.channel_id())); + log_info!(args.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), "Successfully loaded channel {}", log_bytes!(channel.channel_id())); if let Some(short_channel_id) = channel.get_short_channel_id() { short_to_chan_info.insert(short_channel_id, (channel.get_counterparty_node_id(), channel.channel_id())); } @@ -7828,18 +7826,18 @@ where reason: ClosureReason::DisconnectedPeer, }, None)); } else { - log_error!(args.logger, None, None, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.channel_id())); - log_error!(args.logger, None, None, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); - log_error!(args.logger, None, None, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); - log_error!(args.logger, None, None, " Without the ChannelMonitor we cannot continue without risking funds."); - log_error!(args.logger, None, None, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning"); + log_error!(args.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.channel_id())); + log_error!(args.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), " The chain::Watch API *requires* that monitors are persisted durably before returning,"); + log_error!(args.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); + log_error!(args.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), " Without the ChannelMonitor we cannot continue without risking funds."); + log_error!(args.logger, Some(channel.get_counterparty_node_id()), Some(channel.channel_id()), " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning"); return Err(DecodeError::InvalidValue); } } for (funding_txo, _) in args.channel_monitors.iter() { if !funding_txo_set.contains(funding_txo) { - log_info!(args.logger, None, None, "Queueing monitor update to ensure missing channel {} is force closed", + log_info!(args.logger, None, Some(funding_txo.to_channel_id()), "Queueing monitor update to ensure missing channel {} is force closed", log_bytes!(funding_txo.to_channel_id())); let monitor_update = ChannelMonitorUpdate { update_id: CLOSED_CHANNEL_UPDATE_ID, @@ -8001,11 +7999,12 @@ where // We only rebuild the pending payments map if we were most recently serialized by // 0.0.102+ for (_, monitor) in args.channel_monitors.iter() { - if id_to_peer.get(&monitor.get_funding_txo().0.to_channel_id()).is_none() { + let chan_id = monitor.get_funding_txo().0.to_channel_id(); + if id_to_peer.get(&chan_id).is_none() { for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() { if let HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } = htlc_source { if path.hops.is_empty() { - log_error!(args.logger, None, None, "Got an empty path for a pending payment"); + log_error!(args.logger, None, Some(chan_id), "Got an empty path for a pending payment"); return Err(DecodeError::InvalidValue); } @@ -8015,7 +8014,7 @@ where match pending_outbounds.pending_outbound_payments.lock().unwrap().entry(payment_id) { hash_map::Entry::Occupied(mut entry) => { let newly_added = entry.get_mut().insert(session_priv_bytes, &path); - log_info!(args.logger, None, None, "{} a pending payment path for {} msat for session priv {} on an existing pending payment with payment hash {}", + log_info!(args.logger, None, Some(chan_id), "{} a pending payment path for {} msat for session priv {} on an existing pending payment with payment hash {}", if newly_added { "Added" } else { "Had" }, path_amt, log_bytes!(session_priv_bytes), log_bytes!(htlc.payment_hash.0)); }, hash_map::Entry::Vacant(entry) => { @@ -8034,7 +8033,7 @@ where total_msat: path_amt, starting_block_height: best_block_height, }); - log_info!(args.logger, None, None, "Added a pending payment for {} msat with payment hash {} for path with session priv {}", + log_info!(args.logger, None, Some(chan_id), "Added a pending payment for {} msat with payment hash {} for path with session priv {}", path_amt, log_bytes!(htlc.payment_hash.0), log_bytes!(session_priv_bytes)); } } @@ -8056,7 +8055,7 @@ where forwards.retain(|forward| { if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { if pending_forward_matches_htlc(&htlc_info) { - log_info!(args.logger, None, None, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", + log_info!(args.logger, None, Some(chan_id), "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id())); false } else { true } @@ -8066,7 +8065,7 @@ where }); pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| { if pending_forward_matches_htlc(&htlc_info) { - log_info!(args.logger, None, None, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", + log_info!(args.logger, None, Some(chan_id), "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id())); pending_events_read.retain(|(event, _)| { if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event { @@ -8201,14 +8200,14 @@ where } else if !outbound_scid_aliases.insert(chan.outbound_scid_alias()) { // Note that in rare cases its possible to hit this while reading an older // channel if we just happened to pick a colliding outbound alias above. - log_error!(args.logger, None, None, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias()); + log_error!(args.logger, Some(*_peer_node_id), Some(*chan_id), "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias()); return Err(DecodeError::InvalidValue); } if chan.is_usable() { if short_to_chan_info.insert(chan.outbound_scid_alias(), (chan.get_counterparty_node_id(), *chan_id)).is_some() { // Note that in rare cases its possible to hit this while reading an older // channel if we just happened to pick a colliding outbound alias above. - log_error!(args.logger, None, None, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias()); + log_error!(args.logger, Some(*_peer_node_id), Some(*chan_id), "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias()); return Err(DecodeError::InvalidValue); } } @@ -8274,7 +8273,7 @@ where if let Some(peer_state) = per_peer_state.get_mut(&node_id) { peer_state.lock().unwrap().monitor_update_blocked_actions = monitor_update_blocked_actions; } else { - log_error!(args.logger, None, None, "Got blocked actions without a per-peer-state for {}", node_id); + log_error!(args.logger, Some(node_id), None, "Got blocked actions without a per-peer-state for {}", node_id); return Err(DecodeError::InvalidValue); } }