Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
a90e9fe
Split up some tests that have many variants
valentinewallace Mar 17, 2026
8f12703
Remove unnecessary pending_monitor_events clone
valentinewallace Mar 19, 2026
f76f166
Add persistent_monitor_events flag to monitors/manager
valentinewallace Mar 17, 2026
da3632b
Add helper to push monitor events
valentinewallace Mar 18, 2026
2e84b26
Rename pending_monitor_events to _legacy
valentinewallace Mar 18, 2026
fd90e0c
Add chain::Watch ack_monitor_event API
valentinewallace Mar 16, 2026
cb4a804
Add monitor event ids
valentinewallace Mar 18, 2026
491f582
Ack monitor events immediately
valentinewallace Mar 16, 2026
76c142d
Support persistent monitor events
valentinewallace Mar 17, 2026
b1054ff
Track recent monitor updates in TestChainMonitor
valentinewallace Mar 30, 2026
2bfeea5
Persist user channel id in monitors
valentinewallace Mar 24, 2026
8aa3b86
Include user channel id in monitor event
valentinewallace Mar 24, 2026
9432f2b
Pass best block height to outbound_payments::claim_htlc
valentinewallace Apr 3, 2026
66afc41
Pass monitor event id to claim_funds_internal
valentinewallace Apr 3, 2026
e48e7b1
Stop hardcoding from_onchain in monitor ev claim_funds
valentinewallace Apr 3, 2026
e913ece
Add EventCompletionAction::AckMonitorEvent
valentinewallace Apr 3, 2026
a6f59a4
Persistent mon events for off-chain outbound claims
valentinewallace Apr 3, 2026
8c4df02
Filter claims from get_onchain_failed_htlcs return value
valentinewallace Apr 3, 2026
591144b
Persistent monitor events for onchain outbound claims
valentinewallace Apr 3, 2026
a4c3489
claim_funds api: Event -> ForwardEventContents
valentinewallace Apr 4, 2026
09d5aea
Thread monitor event id to claim_mpp_part
valentinewallace Apr 4, 2026
80ea612
Pass monitor event id when claiming in Channel
valentinewallace Apr 4, 2026
e447acd
Cache monitor event id in holding cell claims
valentinewallace Apr 4, 2026
46275f7
Cache monitor event id with inbound removed htlcs
valentinewallace Apr 5, 2026
f73aa93
Ack monitor events post-initial CommitmentSecret
valentinewallace Apr 4, 2026
0ece3d6
Ack monitor events if CommitmentSecret was blocked
valentinewallace Apr 5, 2026
8fdad85
Update monitor event id on duplicate inbound claim
valentinewallace Apr 5, 2026
86a23fe
Abstract params to make claim mon completion action
valentinewallace Apr 5, 2026
13c5908
Add MonitorUpdateCompletionAction::EmitForwardEvent
valentinewallace Apr 5, 2026
c980152
HTLCUpdate::htlc_value_satoshis -> msats
valentinewallace Apr 6, 2026
bd2d404
Persistent monitor events for HTLC forward claims
valentinewallace Apr 7, 2026
8a860dc
Add skimmed fee to monitor updates
valentinewallace Mar 24, 2026
dd56009
MonitorEvents: HTLC failure reason and skimmed fee
valentinewallace Apr 7, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion fuzz/src/chanmon_consistency.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ use lightning::chain;
use lightning::chain::chaininterface::{
BroadcasterInterface, ConfirmationTarget, FeeEstimator, TransactionType,
};
use lightning::chain::chainmonitor::MonitorEventSource;
use lightning::chain::channelmonitor::{ChannelMonitor, MonitorEvent};
use lightning::chain::transaction::OutPoint;
use lightning::chain::{
Expand Down Expand Up @@ -364,9 +365,13 @@ impl chain::Watch<TestChannelSigner> for TestChainMonitor {

fn release_pending_monitor_events(
&self,
) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, PublicKey)> {
) -> Vec<(OutPoint, ChannelId, Vec<(u64, MonitorEvent)>, PublicKey)> {
return self.chain_monitor.release_pending_monitor_events();
}

fn ack_monitor_event(&self, source: MonitorEventSource) {
self.chain_monitor.ack_monitor_event(source);
}
}

struct KeyProvider {
Expand Down
79 changes: 39 additions & 40 deletions lightning/src/chain/chainmonitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,21 @@ use core::iter::Cycle;
use core::ops::Deref;
use core::sync::atomic::{AtomicUsize, Ordering};

/// Identifies the source of a [`MonitorEvent`] for acknowledgment via
/// [`chain::Watch::ack_monitor_event`] once the event has been processed.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct MonitorEventSource {
/// The event ID assigned by the [`ChannelMonitor`].
pub event_id: u64,
/// The channel from which the [`MonitorEvent`] originated.
pub channel_id: ChannelId,
}

impl_writeable_tlv_based!(MonitorEventSource, {
(1, event_id, required),
(3, channel_id, required),
});

/// A pending operation queued for later execution when `ChainMonitor` is in deferred mode.
enum PendingMonitorOp<ChannelSigner: EcdsaChannelSigner> {
/// A new monitor to insert and persist.
Expand Down Expand Up @@ -366,9 +381,6 @@ pub struct ChainMonitor<
fee_estimator: F,
persister: P,
_entropy_source: ES,
/// "User-provided" (ie persistence-completion/-failed) [`MonitorEvent`]s. These came directly
/// from the user and not from a [`ChannelMonitor`].
pending_monitor_events: Mutex<Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, PublicKey)>>,
/// The best block height seen, used as a proxy for the passage of time.
highest_chain_height: AtomicUsize,

Expand Down Expand Up @@ -436,7 +448,6 @@ where
logger,
fee_estimator: feeest,
_entropy_source,
pending_monitor_events: Mutex::new(Vec::new()),
highest_chain_height: AtomicUsize::new(0),
event_notifier: Arc::clone(&event_notifier),
persister: AsyncPersister { persister, event_notifier },
Expand Down Expand Up @@ -657,7 +668,6 @@ where
fee_estimator: feeest,
persister,
_entropy_source,
pending_monitor_events: Mutex::new(Vec::new()),
highest_chain_height: AtomicUsize::new(0),
event_notifier: Arc::new(Notifier::new()),
pending_send_only_events: Mutex::new(Vec::new()),
Expand Down Expand Up @@ -802,16 +812,11 @@ where
return Ok(());
}
let funding_txo = monitor_data.monitor.get_funding_txo();
self.pending_monitor_events.lock().unwrap().push((
monitor_data.monitor.push_monitor_event(MonitorEvent::Completed {
funding_txo,
channel_id,
vec![MonitorEvent::Completed {
funding_txo,
channel_id,
monitor_update_id: monitor_data.monitor.get_latest_update_id(),
}],
monitor_data.monitor.get_counterparty_node_id(),
));
monitor_update_id: monitor_data.monitor.get_latest_update_id(),
});

self.event_notifier.notify();
Ok(())
Expand All @@ -824,14 +829,11 @@ where
pub fn force_channel_monitor_updated(&self, channel_id: ChannelId, monitor_update_id: u64) {
let monitors = self.monitors.read().unwrap();
let monitor = &monitors.get(&channel_id).unwrap().monitor;
let counterparty_node_id = monitor.get_counterparty_node_id();
let funding_txo = monitor.get_funding_txo();
self.pending_monitor_events.lock().unwrap().push((
funding_txo,
monitor.push_monitor_event(MonitorEvent::Completed {
funding_txo: monitor.get_funding_txo(),
channel_id,
vec![MonitorEvent::Completed { funding_txo, channel_id, monitor_update_id }],
counterparty_node_id,
));
monitor_update_id,
});
self.event_notifier.notify();
}

Expand Down Expand Up @@ -1266,21 +1268,13 @@ where
// The channel is post-close (funding spend seen, lockdown, or
// holder tx signed). Return InProgress so ChannelManager freezes
// the channel until the force-close MonitorEvents are processed.
// Push a Completed event into pending_monitor_events so it gets
// picked up after the per-monitor events in the next
// release_pending_monitor_events call.
let funding_txo = monitor.get_funding_txo();
let channel_id = monitor.channel_id();
self.pending_monitor_events.lock().unwrap().push((
funding_txo,
channel_id,
vec![MonitorEvent::Completed {
funding_txo,
channel_id,
monitor_update_id: monitor.get_latest_update_id(),
}],
monitor.get_counterparty_node_id(),
));
// Push a Completed event into the monitor so it gets picked up
// in the next release_pending_monitor_events call.
monitor.push_monitor_event(MonitorEvent::Completed {
funding_txo: monitor.get_funding_txo(),
channel_id: monitor.channel_id(),
monitor_update_id: monitor.get_latest_update_id(),
});
log_debug!(
logger,
"Deferring completion of ChannelMonitorUpdate id {:?} (channel is post-close)",
Expand Down Expand Up @@ -1645,7 +1639,7 @@ where

fn release_pending_monitor_events(
&self,
) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, PublicKey)> {
) -> Vec<(OutPoint, ChannelId, Vec<(u64, MonitorEvent)>, PublicKey)> {
for (channel_id, update_id) in self.persister.get_and_clear_completed_updates() {
let _ = self.channel_monitor_updated(channel_id, update_id);
}
Expand All @@ -1665,12 +1659,17 @@ where
));
}
}
// Drain pending_monitor_events (which includes deferred post-close
// completions) after per-monitor events so that force-close
// MonitorEvents are processed by ChannelManager first.
pending_monitor_events.extend(self.pending_monitor_events.lock().unwrap().split_off(0));
pending_monitor_events
}

fn ack_monitor_event(&self, source: MonitorEventSource) {
let monitors = self.monitors.read().unwrap();
if let Some(monitor_state) = monitors.get(&source.channel_id) {
monitor_state.monitor.ack_monitor_event(source.event_id);
} else {
debug_assert!(false, "Ack'd monitor events should always have a corresponding monitor");
}
}
}

impl<
Expand Down
Loading
Loading