@@ -3696,57 +3696,53 @@ macro_rules! handle_initial_monitor {
36963696 };
36973697}
36983698
3699- macro_rules! handle_new_monitor_update_internal {
3700- (
3701- $self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $logger: expr,
3702- $chan_id: expr, $counterparty_node_id: expr, $all_completed: expr
3703- ) => {{
3704- let in_flight_updates = &mut $peer_state
3705- .in_flight_monitor_updates
3706- .entry($chan_id)
3707- .or_insert_with(|| ($funding_txo, Vec::new()))
3708- .1;
3709- // During startup, we push monitor updates as background events through to here in
3710- // order to replay updates that were in-flight when we shut down. Thus, we have to
3711- // filter for uniqueness here.
3712- let update_idx =
3713- in_flight_updates.iter().position(|upd| upd == &$update).unwrap_or_else(|| {
3714- in_flight_updates.push($update);
3715- in_flight_updates.len() - 1
3716- });
3717- if $self.background_events_processed_since_startup.load(Ordering::Acquire) {
3718- let update_res =
3719- $self.chain_monitor.update_channel($chan_id, &in_flight_updates[update_idx]);
3720- let update_completed = handle_monitor_update_res($self, update_res, $chan_id, $logger);
3721- if update_completed {
3722- let _ = in_flight_updates.remove(update_idx);
3723- if in_flight_updates.is_empty() {
3724- $all_completed;
3725- }
3726- }
3727- update_completed
3728- } else {
3729- // We blindly assume that the ChannelMonitorUpdate will be regenerated on startup if we
3730- // fail to persist it. This is a fairly safe assumption, however, since anything we do
3731- // during the startup sequence should be replayed exactly if we immediately crash.
3732- let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
3733- counterparty_node_id: $counterparty_node_id,
3734- funding_txo: $funding_txo,
3735- channel_id: $chan_id,
3736- update: in_flight_updates[update_idx].clone(),
3737- };
3738- // We want to track the in-flight update both in `in_flight_monitor_updates` and in
3739- // `pending_background_events` to avoid a race condition during
3740- // `pending_background_events` processing where we complete one
3741- // `ChannelMonitorUpdate` (but there are more pending as background events) but we
3742- // conclude that all pending `ChannelMonitorUpdate`s have completed and its safe to
3743- // run post-completion actions.
3744- // We could work around that with some effort, but its simpler to just track updates
3745- // twice.
3746- $self.pending_background_events.lock().unwrap().push(event);
3747- false
3748- }
3749- }};
3699+ fn handle_new_monitor_update_internal<CM: AChannelManager, LG: Logger>(
3700+ cm: &CM, in_flight_monitor_updates: &mut BTreeMap<ChannelId, (OutPoint, Vec<ChannelMonitorUpdate>)>,
3701+ channel_id: ChannelId, funding_txo: OutPoint, counterparty_node_id: PublicKey,
3702+ new_update: ChannelMonitorUpdate, logger: LG,
3703+ ) -> (bool, bool) {
3704+ let in_flight_updates = &mut in_flight_monitor_updates
3705+ .entry(channel_id)
3706+ .or_insert_with(|| (funding_txo, Vec::new()))
3707+ .1;
3708+ // During startup, we push monitor updates as background events through to here in
3709+ // order to replay updates that were in-flight when we shut down. Thus, we have to
3710+ // filter for uniqueness here.
3711+ let update_idx =
3712+ in_flight_updates.iter().position(|upd| upd == &new_update).unwrap_or_else(|| {
3713+ in_flight_updates.push(new_update);
3714+ in_flight_updates.len() - 1
3715+ });
3716+
3717+ if cm.get_cm().background_events_processed_since_startup.load(Ordering::Acquire) {
3718+ let update_res =
3719+ cm.get_cm().chain_monitor.update_channel(channel_id, &in_flight_updates[update_idx]);
3720+ let update_completed = handle_monitor_update_res(cm, update_res, channel_id, logger);
3721+ if update_completed {
3722+ let _ = in_flight_updates.remove(update_idx);
3723+ }
3724+ (update_completed, update_completed && in_flight_updates.is_empty())
3725+ } else {
3726+ // We blindly assume that the ChannelMonitorUpdate will be regenerated on startup if we
3727+ // fail to persist it. This is a fairly safe assumption, however, since anything we do
3728+ // during the startup sequence should be replayed exactly if we immediately crash.
3729+ let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
3730+ counterparty_node_id,
3731+ funding_txo,
3732+ channel_id,
3733+ update: in_flight_updates[update_idx].clone(),
3734+ };
3735+ // We want to track the in-flight update both in `in_flight_monitor_updates` and in
3736+ // `pending_background_events` to avoid a race condition during
3737+ // `pending_background_events` processing where we complete one
3738+ // `ChannelMonitorUpdate` (but there are more pending as background events) but we
3739+ // conclude that all pending `ChannelMonitorUpdate`s have completed and its safe to
3740+ // run post-completion actions.
3741+ // We could work around that with some effort, but its simpler to just track updates
3742+ // twice.
3743+ cm.get_cm().pending_background_events.lock().unwrap().push(event);
3744+ (false, false)
3745+ }
37503746}
37513747
37523748macro_rules! handle_post_close_monitor_update {
@@ -3756,26 +3752,23 @@ macro_rules! handle_post_close_monitor_update {
37563752 ) => {{
37573753 let logger =
37583754 WithContext::from(&$self.logger, Some($counterparty_node_id), Some($channel_id), None);
3759- handle_new_monitor_update_internal!(
3760- $self,
3761- $funding_txo,
3762- $update,
3763- $peer_state,
3755+ let in_flight_updates = &mut $peer_state.in_flight_monitor_updates;
3756+ let (update_completed, all_updates_complete) = handle_new_monitor_update_internal(
3757+ $self, in_flight_updates, $channel_id, $funding_txo, $counterparty_node_id, $update,
37643758 logger,
3765- $channel_id,
3766- $counterparty_node_id,
3767- {
3768- let update_actions = $peer_state
3769- .monitor_update_blocked_actions
3770- .remove(&$channel_id)
3771- .unwrap_or(Vec::new());
3759+ );
3760+ if all_updates_complete {
3761+ let update_actions = $peer_state
3762+ .monitor_update_blocked_actions
3763+ .remove(&$channel_id)
3764+ .unwrap_or(Vec::new());
37723765
3773- mem::drop($peer_state_lock);
3774- mem::drop($per_peer_state_lock);
3766+ mem::drop($peer_state_lock);
3767+ mem::drop($per_peer_state_lock);
37753768
3776- $self.handle_monitor_update_completion_actions(update_actions);
3777- }
3778- )
3769+ $self.handle_monitor_update_completion_actions(update_actions);
3770+ }
3771+ update_completed
37793772 }};
37803773}
37813774
@@ -3795,16 +3788,11 @@ macro_rules! handle_new_monitor_update_locked_actions_handled_by_caller {
37953788 let logger = WithChannelContext::from(&$self.logger, &$chan_context, None);
37963789 let chan_id = $chan_context.channel_id();
37973790 let counterparty_node_id = $chan_context.get_counterparty_node_id();
3798- handle_new_monitor_update_internal!(
3799- $self,
3800- $funding_txo,
3801- $update,
3802- $peer_state,
3803- logger,
3804- chan_id,
3805- counterparty_node_id,
3806- {}
3807- )
3791+ let in_flight_updates = &mut $peer_state.in_flight_monitor_updates;
3792+ let (update_completed, _all_updates_complete) = handle_new_monitor_update_internal(
3793+ $self, in_flight_updates, chan_id, $funding_txo, counterparty_node_id, $update, logger,
3794+ );
3795+ update_completed
38083796 }};
38093797}
38103798
@@ -3816,26 +3804,20 @@ macro_rules! handle_new_monitor_update {
38163804 let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
38173805 let chan_id = $chan.context.channel_id();
38183806 let counterparty_node_id = $chan.context.get_counterparty_node_id();
3819- handle_new_monitor_update_internal!(
3820- $self,
3821- $funding_txo,
3822- $update,
3823- $peer_state,
3824- logger,
3825- chan_id,
3826- counterparty_node_id,
3827- {
3828- if $chan.blocked_monitor_updates_pending() == 0 {
3829- handle_monitor_update_completion!(
3830- $self,
3831- $peer_state_lock,
3832- $peer_state,
3833- $per_peer_state_lock,
3834- $chan
3835- );
3836- }
3837- }
3838- )
3807+ let in_flight_updates = &mut $peer_state.in_flight_monitor_updates;
3808+ let (update_completed, all_updates_complete) = handle_new_monitor_update_internal(
3809+ $self, in_flight_updates, chan_id, $funding_txo, counterparty_node_id, $update, logger,
3810+ );
3811+ if all_updates_complete && $chan.blocked_monitor_updates_pending() == 0 {
3812+ handle_monitor_update_completion!(
3813+ $self,
3814+ $peer_state_lock,
3815+ $peer_state,
3816+ $per_peer_state_lock,
3817+ $chan
3818+ );
3819+ }
3820+ update_completed
38393821 }};
38403822}
38413823
0 commit comments