Skip to content

Add counterparty_node_id & channel_capacity to ChannelClosed event #2387

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions lightning-persister/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,7 @@ mod tests {
// Force close because cooperative close doesn't result in any persisted
// updates.
nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);

Expand All @@ -246,7 +246,7 @@ mod tests {

connect_block(&nodes[1], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[0].clone()]));
check_closed_broadcast!(nodes[1], true);
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
check_added_monitors!(nodes[1], 1);

// Make sure everything is persisted as expected after close.
Expand All @@ -270,7 +270,7 @@ mod tests {
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
Expand Down Expand Up @@ -309,7 +309,7 @@ mod tests {
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
Expand Down
6 changes: 4 additions & 2 deletions lightning/src/chain/chainmonitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -966,7 +966,8 @@ mod tests {
assert!(err.contains("ChannelMonitor storage failure")));
check_added_monitors!(nodes[0], 2); // After the failure we generate a close-channel monitor update
check_closed_broadcast!(nodes[0], true);
check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() },
[nodes[1].node.get_our_node_id()], 100000);

// However, as the ChainMonitor is still waiting for the original persistence to complete,
// it won't yet release the MonitorEvents.
Expand Down Expand Up @@ -1013,7 +1014,8 @@ mod tests {
// ... however once we get events once, the channel will close, creating a channel-closed
// ChannelMonitorUpdate.
check_closed_broadcast!(nodes[0], true);
check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() });
check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() },
[nodes[1].node.get_our_node_id()], 100000);
check_added_monitors!(nodes[0], 1);
}
}
3 changes: 2 additions & 1 deletion lightning/src/chain/channelmonitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4276,7 +4276,8 @@ mod tests {
assert!(err.contains("ChannelMonitor storage failure")));
check_added_monitors!(nodes[1], 2); // After the failure we generate a close-channel monitor update
check_closed_broadcast!(nodes[1], true);
check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() },
[nodes[0].node.get_our_node_id()], 100000);

// Build a new ChannelMonitorUpdate which contains both the failing commitment tx update
// and provides the claim preimages for the two pending HTLCs. The first update generates
Expand Down
23 changes: 20 additions & 3 deletions lightning/src/events/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -755,7 +755,15 @@ pub enum Event {
/// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels
user_channel_id: u128,
/// The reason the channel was closed.
reason: ClosureReason
reason: ClosureReason,
/// Counterparty in the closed channel.
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You have quite a bit of whitespace at the ends of lines in your patch, a local git show should highlight them for you.

///
/// This field will be `None` for objects serialized prior to LDK 0.0.117.
counterparty_node_id: Option<PublicKey>,
/// Channel capacity of the closing channel (sats).
///
/// This field will be `None` for objects serialized prior to LDK 0.0.117.
channel_capacity_sats: Option<u64>,
},
/// Used to indicate to the user that they can abandon the funding transaction and recycle the
/// inputs for another purpose.
Expand Down Expand Up @@ -956,7 +964,9 @@ impl Writeable for Event {
(5, outbound_amount_forwarded_msat, option),
});
},
&Event::ChannelClosed { ref channel_id, ref user_channel_id, ref reason } => {
&Event::ChannelClosed { ref channel_id, ref user_channel_id, ref reason,
ref counterparty_node_id, ref channel_capacity_sats
} => {
9u8.write(writer)?;
// `user_channel_id` used to be a single u64 value. In order to remain backwards
// compatible with versions prior to 0.0.113, the u128 is serialized as two
Expand All @@ -968,6 +978,8 @@ impl Writeable for Event {
(1, user_channel_id_low, required),
(2, reason, required),
(3, user_channel_id_high, required),
(5, counterparty_node_id, option),
(7, channel_capacity_sats, option),
});
},
&Event::DiscardFunding { ref channel_id, ref transaction } => {
Expand Down Expand Up @@ -1252,11 +1264,15 @@ impl MaybeReadable for Event {
let mut reason = UpgradableRequired(None);
let mut user_channel_id_low_opt: Option<u64> = None;
let mut user_channel_id_high_opt: Option<u64> = None;
let mut counterparty_node_id = None;
let mut channel_capacity_sats = None;
read_tlv_fields!(reader, {
(0, channel_id, required),
(1, user_channel_id_low_opt, option),
(2, reason, upgradable_required),
(3, user_channel_id_high_opt, option),
(5, counterparty_node_id, option),
(7, channel_capacity_sats, option),
});

// `user_channel_id` used to be a single u64 value. In order to remain
Expand All @@ -1265,7 +1281,8 @@ impl MaybeReadable for Event {
let user_channel_id = (user_channel_id_low_opt.unwrap_or(0) as u128) +
((user_channel_id_high_opt.unwrap_or(0) as u128) << 64);

Ok(Some(Event::ChannelClosed { channel_id, user_channel_id, reason: _init_tlv_based_struct_field!(reason, upgradable_required) }))
Ok(Some(Event::ChannelClosed { channel_id, user_channel_id, reason: _init_tlv_based_struct_field!(reason, upgradable_required),
counterparty_node_id, channel_capacity_sats }))
};
f()
},
Expand Down
25 changes: 14 additions & 11 deletions lightning/src/ln/chanmon_update_fail_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,8 @@ fn test_simple_monitor_permanent_update_fail() {
// PaymentPathFailed event

assert_eq!(nodes[0].node.list_channels().len(), 0);
check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() },
[nodes[1].node.get_our_node_id()], 100000);
}

#[test]
Expand Down Expand Up @@ -247,7 +248,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
// PaymentPathFailed event

assert_eq!(nodes[0].node.list_channels().len(), 0);
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
}

#[test]
Expand Down Expand Up @@ -1987,8 +1988,8 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:

send_payment(&nodes[0], &[&nodes[1]], 8000000);
close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true);
check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
}

#[test]
Expand Down Expand Up @@ -2188,7 +2189,7 @@ fn test_fail_htlc_on_broadcast_after_claim() {
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);

mine_transaction(&nodes[1], &bs_txn[0]);
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
check_closed_broadcast!(nodes[1], true);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
check_added_monitors!(nodes[1], 1);
Expand Down Expand Up @@ -2666,8 +2667,8 @@ fn test_temporary_error_during_shutdown() {
assert_eq!(txn_a, txn_b);
assert_eq!(txn_a.len(), 1);
check_spends!(txn_a[0], funding_tx);
check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
}

#[test]
Expand Down Expand Up @@ -2696,7 +2697,8 @@ fn test_permanent_error_during_sending_shutdown() {
if let MessageSendEvent::HandleError { .. } = msg_events[2] {} else { panic!(); }

check_added_monitors!(nodes[0], 2);
check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() },
[nodes[1].node.get_our_node_id()], 100000);
}

#[test]
Expand Down Expand Up @@ -2727,7 +2729,8 @@ fn test_permanent_error_during_handling_shutdown() {
if let MessageSendEvent::HandleError { .. } = msg_events[2] {} else { panic!(); }

check_added_monitors!(nodes[1], 2);
check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() },
[nodes[0].node.get_our_node_id()], 100000);
}

#[test]
Expand Down Expand Up @@ -2921,7 +2924,7 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) {
nodes[0].chain_source.watched_outputs.lock().unwrap().clear();

reload_node!(nodes[0], &nodes[0].node.encode(), &[], persister, new_chain_monitor, nodes_0_deserialized);
check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer);
check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer, [nodes[1].node.get_our_node_id()], 100000);
assert!(nodes[0].node.list_channels().is_empty());
}

Expand Down Expand Up @@ -3008,7 +3011,7 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo

reload_node!(nodes[1], &nodes[1].node.encode(), &[], persister, new_chain_monitor, nodes_1_deserialized);

check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer, [nodes[0].node.get_our_node_id()], 100000);
assert!(nodes[1].node.list_channels().is_empty());
}

Expand Down
43 changes: 28 additions & 15 deletions lightning/src/ln/channelmanager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -376,6 +376,7 @@ struct MsgHandleErrInternal {
err: msgs::LightningError,
chan_id: Option<([u8; 32], u128)>, // If Some a channel of ours has been closed
shutdown_finish: Option<(ShutdownResult, Option<msgs::ChannelUpdate>)>,
channel_capacity: Option<u64>,
}
impl MsgHandleErrInternal {
#[inline]
Expand All @@ -392,14 +393,15 @@ impl MsgHandleErrInternal {
},
chan_id: None,
shutdown_finish: None,
channel_capacity: None,
}
}
#[inline]
fn from_no_close(err: msgs::LightningError) -> Self {
Self { err, chan_id: None, shutdown_finish: None }
Self { err, chan_id: None, shutdown_finish: None, channel_capacity: None }
}
#[inline]
fn from_finish_shutdown(err: String, channel_id: [u8; 32], user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
fn from_finish_shutdown(err: String, channel_id: [u8; 32], user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>, channel_capacity: u64) -> Self {
Self {
err: LightningError {
err: err.clone(),
Expand All @@ -412,6 +414,7 @@ impl MsgHandleErrInternal {
},
chan_id: Some((channel_id, user_channel_id)),
shutdown_finish: Some((shutdown_res, channel_update)),
channel_capacity: Some(channel_capacity)
}
}
#[inline]
Expand Down Expand Up @@ -444,6 +447,7 @@ impl MsgHandleErrInternal {
},
chan_id: None,
shutdown_finish: None,
channel_capacity: None,
}
}
}
Expand Down Expand Up @@ -1659,7 +1663,7 @@ macro_rules! handle_error {

match $internal {
Ok(msg) => Ok(msg),
Err(MsgHandleErrInternal { err, chan_id, shutdown_finish }) => {
Err(MsgHandleErrInternal { err, chan_id, shutdown_finish, channel_capacity }) => {
let mut msg_events = Vec::with_capacity(2);

if let Some((shutdown_res, update_option)) = shutdown_finish {
Expand All @@ -1672,7 +1676,9 @@ macro_rules! handle_error {
if let Some((channel_id, user_channel_id)) = chan_id {
$self.pending_events.lock().unwrap().push_back((events::Event::ChannelClosed {
channel_id, user_channel_id,
reason: ClosureReason::ProcessingError { err: err.err.clone() }
reason: ClosureReason::ProcessingError { err: err.err.clone() },
counterparty_node_id: Some($counterparty_node_id),
channel_capacity_sats: channel_capacity,
}, None));
}
}
Expand Down Expand Up @@ -1745,7 +1751,7 @@ macro_rules! convert_chan_err {
update_maps_on_chan_removal!($self, &$channel.context);
let shutdown_res = $channel.context.force_shutdown(true);
(true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.context.get_user_id(),
shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok(), $channel.context.get_value_satoshis()))
},
}
};
Expand All @@ -1758,7 +1764,7 @@ macro_rules! convert_chan_err {
update_maps_on_chan_removal!($self, &$channel_context);
let shutdown_res = $channel_context.force_shutdown(false);
(true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel_context.get_user_id(),
shutdown_res, None))
shutdown_res, None, $channel_context.get_value_satoshis()))
},
}
}
Expand Down Expand Up @@ -1937,7 +1943,7 @@ macro_rules! handle_new_monitor_update {
let res = Err(MsgHandleErrInternal::from_finish_shutdown(
"ChannelMonitor storage failure".to_owned(), $chan.context.channel_id(),
$chan.context.get_user_id(), $chan.context.force_shutdown(false),
$self.get_channel_update_for_broadcast(&$chan).ok()));
$self.get_channel_update_for_broadcast(&$chan).ok(), $chan.context.get_value_satoshis()));
$remove;
res
},
Expand Down Expand Up @@ -2371,7 +2377,9 @@ where
pending_events_lock.push_back((events::Event::ChannelClosed {
channel_id: context.channel_id(),
user_channel_id: context.get_user_id(),
reason: closure_reason
reason: closure_reason,
counterparty_node_id: Some(context.get_counterparty_node_id()),
channel_capacity_sats: Some(context.get_value_satoshis()),
}, None));
}

Expand Down Expand Up @@ -3371,7 +3379,8 @@ where
let channel_id = chan.context.channel_id();
let user_id = chan.context.get_user_id();
let shutdown_res = chan.context.force_shutdown(false);
(chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, user_id, shutdown_res, None))
let channel_capacity = chan.context.get_value_satoshis();
(chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, user_id, shutdown_res, None, channel_capacity))
} else { unreachable!(); });
match funding_res {
Ok((chan, funding_msg)) => (chan, funding_msg),
Expand Down Expand Up @@ -5408,7 +5417,7 @@ where
let user_id = inbound_chan.context.get_user_id();
let shutdown_res = inbound_chan.context.force_shutdown(false);
return Err(MsgHandleErrInternal::from_finish_shutdown(format!("{}", err),
msg.temporary_channel_id, user_id, shutdown_res, None));
msg.temporary_channel_id, user_id, shutdown_res, None, inbound_chan.context.get_value_satoshis()));
},
}
},
Expand Down Expand Up @@ -8356,7 +8365,9 @@ where
channel_closures.push_back((events::Event::ChannelClosed {
channel_id: channel.context.channel_id(),
user_channel_id: channel.context.get_user_id(),
reason: ClosureReason::OutdatedChannelManager
reason: ClosureReason::OutdatedChannelManager,
counterparty_node_id: Some(channel.context.get_counterparty_node_id()),
channel_capacity_sats: Some(channel.context.get_value_satoshis()),
}, None));
for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() {
let mut found_htlc = false;
Expand Down Expand Up @@ -8408,6 +8419,8 @@ where
channel_id: channel.context.channel_id(),
user_channel_id: channel.context.get_user_id(),
reason: ClosureReason::DisconnectedPeer,
counterparty_node_id: Some(channel.context.get_counterparty_node_id()),
channel_capacity_sats: Some(channel.context.get_value_satoshis()),
}, None));
} else {
log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.context.channel_id()));
Expand Down Expand Up @@ -9623,7 +9636,7 @@ mod tests {
nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);

{
// Assert that nodes[1] is awaiting removal for nodes[0] once nodes[1] has been
Expand Down Expand Up @@ -9786,8 +9799,8 @@ mod tests {
}
let (_nodes_1_update, _none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());

check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000);
check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000);
}

fn check_not_connected_to_peer_error<T>(res_err: Result<T, APIError>, expected_public_key: PublicKey) {
Expand Down Expand Up @@ -10182,7 +10195,7 @@ mod tests {
let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
assert!(!open_channel_msg.channel_type.unwrap().supports_anchors_zero_fee_htlc_tx());

check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
}

#[test]
Expand Down
Loading