@@ -3339,8 +3339,22 @@ macro_rules! emit_initial_channel_ready_event {
33393339 };
33403340}
33413341
3342+ /// Handles the completion steps for when a [`ChannelMonitorUpdate`] is applied to a live channel.
3343+ ///
3344+ /// You should not add new direct calls to this, generally, rather rely on
3345+ /// `handle_new_monitor_update` or [`ChannelManager::channel_monitor_updated`] to call it for you.
3346+ ///
3347+ /// Requires that `$chan.blocked_monitor_updates_pending() == 0` and the in-flight monitor update
3348+ /// set for this channel is empty!
33423349macro_rules! handle_monitor_update_completion {
33433350 ($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
3351+ #[cfg(debug_assertions)]
3352+ {
3353+ let in_flight_updates =
3354+ $peer_state.in_flight_monitor_updates.get(&$chan.context.channel_id());
3355+ assert!(in_flight_updates.map(|(_, updates)| updates.is_empty()).unwrap_or(true));
3356+ assert_eq!($chan.blocked_monitor_updates_pending(), 0);
3357+ }
33443358 let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
33453359 let mut updates = $chan.monitor_updating_restored(&&logger,
33463360 &$self.node_signer, $self.chain_hash, &$self.default_configuration,
@@ -4258,19 +4272,7 @@ where
42584272 // TODO: If we do the `in_flight_monitor_updates.is_empty()` check in
42594273 // `locked_close_channel` we can skip the locks here.
42604274 if shutdown_res.channel_funding_txo.is_some() {
4261- let per_peer_state = self.per_peer_state.read().unwrap();
4262- if let Some(peer_state_mtx) = per_peer_state.get(&shutdown_res.counterparty_node_id) {
4263- let mut peer_state = peer_state_mtx.lock().unwrap();
4264- if peer_state.in_flight_monitor_updates.get(&shutdown_res.channel_id).map(|(_, updates)| updates.is_empty()).unwrap_or(true) {
4265- let update_actions = peer_state.monitor_update_blocked_actions
4266- .remove(&shutdown_res.channel_id).unwrap_or(Vec::new());
4267-
4268- mem::drop(peer_state);
4269- mem::drop(per_peer_state);
4270-
4271- self.handle_monitor_update_completion_actions(update_actions);
4272- }
4273- }
4275+ self.channel_monitor_updated(&shutdown_res.channel_id, None, &shutdown_res.counterparty_node_id);
42744276 }
42754277 }
42764278 let mut shutdown_results: Vec<(Result<Infallible, _>, _)> = Vec::new();
@@ -7147,25 +7149,7 @@ where
71477149 self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
71487150 },
71497151 BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => {
7150- let per_peer_state = self.per_peer_state.read().unwrap();
7151- if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
7152- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
7153- let peer_state = &mut *peer_state_lock;
7154- if let Some(chan) = peer_state.channel_by_id
7155- .get_mut(&channel_id)
7156- .and_then(Channel::as_funded_mut)
7157- {
7158- if chan.blocked_monitor_updates_pending() == 0 {
7159- handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan);
7160- }
7161- } else {
7162- let update_actions = peer_state.monitor_update_blocked_actions
7163- .remove(&channel_id).unwrap_or(Vec::new());
7164- mem::drop(peer_state_lock);
7165- mem::drop(per_peer_state);
7166- self.handle_monitor_update_completion_actions(update_actions);
7167- }
7168- }
7152+ self.channel_monitor_updated(&channel_id, None, &counterparty_node_id);
71697153 },
71707154 }
71717155 }
@@ -8641,7 +8625,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
86418625 }
86428626
86438627 #[rustfmt::skip]
8644- fn channel_monitor_updated(&self, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: &PublicKey) {
8628+ fn channel_monitor_updated(&self, channel_id: &ChannelId, highest_applied_update_id: Option< u64> , counterparty_node_id: &PublicKey) {
86458629 debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
86468630
86478631 let per_peer_state = self.per_peer_state.read().unwrap();
@@ -8651,16 +8635,33 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
86518635 peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
86528636 let peer_state = &mut *peer_state_lock;
86538637
8638+ let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*channel_id), None);
86548639 let remaining_in_flight =
86558640 if let Some((_, pending)) = peer_state.in_flight_monitor_updates.get_mut(channel_id) {
8656- pending.retain(|upd| upd.update_id > highest_applied_update_id);
8641+ if let Some(highest_applied_update_id) = highest_applied_update_id {
8642+ pending.retain(|upd| upd.update_id > highest_applied_update_id);
8643+ log_trace!(
8644+ logger,
8645+ "ChannelMonitor updated to {highest_applied_update_id}. {} pending in-flight updates.",
8646+ pending.len()
8647+ );
8648+ } else if let Some(update) = pending.get(0) {
8649+ log_trace!(
8650+ logger,
8651+ "ChannelMonitor updated to {}. {} pending in-flight updates.",
8652+ update.update_id - 1,
8653+ pending.len()
8654+ );
8655+ } else {
8656+ log_trace!(
8657+ logger,
8658+ "ChannelMonitor updated. {} pending in-flight updates.",
8659+ pending.len()
8660+ );
8661+ }
86578662 pending.len()
86588663 } else { 0 };
86598664
8660- let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*channel_id), None);
8661- log_trace!(logger, "ChannelMonitor updated to {}. {} pending in-flight updates.",
8662- highest_applied_update_id, remaining_in_flight);
8663-
86648665 if remaining_in_flight != 0 {
86658666 return;
86668667 }
@@ -10891,7 +10892,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1089110892 MonitorEvent::Completed { channel_id, monitor_update_id, .. } => {
1089210893 self.channel_monitor_updated(
1089310894 &channel_id,
10894- monitor_update_id,
10895+ Some( monitor_update_id) ,
1089510896 &counterparty_node_id,
1089610897 );
1089710898 },
@@ -13127,38 +13128,7 @@ where
1312713128
1312813129 #[cfg(splicing)]
1312913130 for (counterparty_node_id, channel_id) in to_process_monitor_update_actions {
13130- let per_peer_state = self.per_peer_state.read().unwrap();
13131- if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
13132- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
13133- let peer_state = &mut *peer_state_lock;
13134- let has_in_flight_updates = peer_state
13135- .in_flight_monitor_updates
13136- .get(&channel_id)
13137- .map(|in_flight_updates| !in_flight_updates.1.is_empty())
13138- .unwrap_or(false);
13139- if let Some(chan) = peer_state.channel_by_id
13140- .get_mut(&channel_id)
13141- .and_then(Channel::as_funded_mut)
13142- {
13143- if !has_in_flight_updates && chan.blocked_monitor_updates_pending() == 0 {
13144- handle_monitor_update_completion!(
13145- self,
13146- peer_state_lock,
13147- peer_state,
13148- per_peer_state,
13149- chan
13150- );
13151- }
13152- } else {
13153- let update_actions = peer_state
13154- .monitor_update_blocked_actions
13155- .remove(&channel_id)
13156- .unwrap_or(Vec::new());
13157- mem::drop(peer_state_lock);
13158- mem::drop(per_peer_state);
13159- self.handle_monitor_update_completion_actions(update_actions);
13160- }
13161- }
13131+ self.channel_monitor_updated(&channel_id, None, &counterparty_node_id);
1316213132 }
1316313133
1316413134 if let Some(height) = height_opt {
0 commit comments