From e45bfeda23705287eb5b55aa0effedeb7d5451ff Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 7 Aug 2025 20:46:24 +0000 Subject: [PATCH 1/3] Add a method to avoid re-persisting monitors during startup Prior to LDK 0.1, in rare cases we could replay payment claims to `ChannelMonitor`s on startup, which we then expected to be persisted prior to normal node operation. This required re-persisting `ChannelMonitor`s after deserializing the `ChannelManager`, delaying startup in some cases substantially. In 0.1 we fixed this, moving claim replays to the background to run after the `ChannelManager` starts operating (and only updating/persisting changes to the `ChannelMonitor`s which need it). However, we didn't actually enable this meaningfully in our API - nearly all users use our `ChainMonitor` and the only way to get a chanel into `ChainMonitor` is through the normal flow which expects to persist. Here we add a simple method to load `ChannelMonitor`s into the `ChainMonitor` without persisting them. --- lightning/src/chain/chainmonitor.rs | 37 +++++++++++++++++++++++ lightning/src/ln/channelmanager.rs | 5 +++ lightning/src/ln/functional_test_utils.rs | 5 +-- lightning/src/util/test_utils.rs | 27 +++++++++++++++++ 4 files changed, 70 insertions(+), 4 deletions(-) diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 0de372813b3..7579a57aa3c 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -825,6 +825,43 @@ where self.pending_send_only_events.lock().unwrap().push(send_peer_storage_event) } + + /// Loads a [`ChannelMonitor`] which already exists on disk after startup. + /// + /// Using this over [`chain::Watch::watch_channel`] avoids re-persisting a [`ChannelMonitor`] + /// that hasn't changed, slowing down startup. + /// + /// Note that this method *can* be used if additional blocks were replayed against the + /// [`ChannelMonitor`], and in general can only *not* be used if a [`ChannelMonitorUpdate`] was + /// replayed against the [`ChannelMonitor`] which needs to be psersisted (i.e. the state has + /// changed due to a [`ChannelMonitorUpdate`] such that it may be different after another + /// restart). + /// + /// This method is only safe for [`ChannelMonitor`]s which have been loaded (in conjunction + /// with a `ChannelManager`) at least once by LDK 0.1 or later. + pub fn load_post_0_1_existing_monitor( + &self, channel_id: ChannelId, monitor: ChannelMonitor, + ) -> Result<(), ()> { + let logger = WithChannelMonitor::from(&self.logger, &monitor, None); + let mut monitors = self.monitors.write().unwrap(); + let entry = match monitors.entry(channel_id) { + hash_map::Entry::Occupied(_) => { + log_error!(logger, "Failed to add new channel data: channel monitor for given channel ID is already present"); + return Err(()); + }, + hash_map::Entry::Vacant(e) => e, + }; + log_trace!( + logger, + "Loaded existing ChannelMonitor for channel {}", + log_funding_info!(monitor) + ); + if let Some(ref chain_source) = self.chain_source { + monitor.load_outputs_to_watch(chain_source, &self.logger); + } + entry.insert(MonitorHolder { monitor, pending_monitor_updates: Mutex::new(Vec::new()) }); + Ok(()) + } } impl< diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 1fd99f89451..92e0fdd65b4 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -15396,9 +15396,13 @@ impl Readable for VecDeque<(Event, Option)> { /// This is important if you have replayed a nontrivial number of blocks in step (4), allowing /// you to avoid having to replay the same blocks if you shut down quickly after startup. It is /// otherwise not required. +/// /// Note that if you're using a [`ChainMonitor`] for your [`chain::Watch`] implementation, you /// will likely accomplish this as a side-effect of calling [`chain::Watch::watch_channel`] in /// the next step. +/// +/// If you wish to avoid this for performance reasons, use +/// [`ChainMonitor::load_post_0_1_existing_monitor`]. /// 7) Move the [`ChannelMonitor`]s into your local [`chain::Watch`]. If you're using a /// [`ChainMonitor`], this is done by calling [`chain::Watch::watch_channel`]. /// @@ -15413,6 +15417,7 @@ impl Readable for VecDeque<(Event, Option)> { /// which you've already broadcasted the transaction. /// /// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor +/// [`ChainMonitor::load_post_0_1_existing_monitor`]: crate::chain::chainmonitor::ChainMonitor::load_post_0_1_existing_monitor pub struct ChannelManagerReadArgs< 'a, M: Deref, diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 00e883e8561..fdbd7f8cc5a 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1323,10 +1323,7 @@ pub fn _reload_node<'a, 'b, 'c>( for monitor in monitors_read.drain(..) { let channel_id = monitor.channel_id(); - assert_eq!( - node.chain_monitor.watch_channel(channel_id, monitor), - Ok(ChannelMonitorUpdateStatus::Completed) - ); + assert_eq!(node.chain_monitor.load_post_0_1_existing_monitor(channel_id, monitor), Ok(())); check_added_monitors!(node, 1); } diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index e5388b76049..918e445afab 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -515,6 +515,33 @@ impl<'a> TestChainMonitor<'a> { self.latest_monitor_update_id.lock().unwrap().get(channel_id).unwrap().clone(); self.chain_monitor.channel_monitor_updated(*channel_id, latest_update).unwrap(); } + + pub fn load_post_0_1_existing_monitor( + &self, channel_id: ChannelId, monitor: ChannelMonitor, + ) -> Result<(), ()> { + #[cfg(feature = "std")] + if let Some(blocker) = &*self.write_blocker.lock().unwrap() { + blocker.recv().unwrap(); + } + + // At every point where we get a monitor update, we should be able to send a useful monitor + // to a watchtower and disk... + let mut w = TestVecWriter(Vec::new()); + monitor.write(&mut w).unwrap(); + let new_monitor = <(BlockHash, ChannelMonitor)>::read( + &mut io::Cursor::new(&w.0), + (self.keys_manager, self.keys_manager), + ) + .unwrap() + .1; + assert!(new_monitor == monitor); + self.latest_monitor_update_id + .lock() + .unwrap() + .insert(channel_id, (monitor.get_latest_update_id(), monitor.get_latest_update_id())); + self.added_monitors.lock().unwrap().push((channel_id, monitor)); + self.chain_monitor.load_post_0_1_existing_monitor(channel_id, new_monitor) + } } impl<'a> chain::Watch for TestChainMonitor<'a> { fn watch_channel( From b429f30ca455fa83d42b59d118c2dc6fe2277ef3 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 8 Aug 2025 13:45:59 +0000 Subject: [PATCH 2/3] f auto-detect if we need persistence --- lightning/src/chain/chainmonitor.rs | 20 +++++++++++++++----- lightning/src/chain/channelmonitor.rs | 14 ++++++++++++++ lightning/src/ln/channelmanager.rs | 4 ++-- lightning/src/ln/functional_test_utils.rs | 5 ++++- lightning/src/util/test_utils.rs | 10 ++++++---- 5 files changed, 41 insertions(+), 12 deletions(-) diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 7579a57aa3c..80d8f40d1bb 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -837,11 +837,20 @@ where /// changed due to a [`ChannelMonitorUpdate`] such that it may be different after another /// restart). /// - /// This method is only safe for [`ChannelMonitor`]s which have been loaded (in conjunction - /// with a `ChannelManager`) at least once by LDK 0.1 or later. - pub fn load_post_0_1_existing_monitor( + /// For [`ChannelMonitor`]s which were last serialized by an LDK version prior to 0.1 this will + /// fall back to calling [`chain::Watch::watch_channel`] and persisting the [`ChannelMonitor`]. + /// See the release notes for LDK 0.1 for more information on this requirement. + /// + /// [`ChannelMonitor`]s which do not need to be persisted (i.e. were last written by LDK 0.1 or + /// later) will be loaded without persistence and this method will return + /// [`ChannelMonitorUpdateStatus::Completed`]. + pub fn load_existing_monitor( &self, channel_id: ChannelId, monitor: ChannelMonitor, - ) -> Result<(), ()> { + ) -> Result { + if !monitor.written_by_0_1_or_later() { + return chain::Watch::watch_channel(self, channel_id, monitor); + } + let logger = WithChannelMonitor::from(&self.logger, &monitor, None); let mut monitors = self.monitors.write().unwrap(); let entry = match monitors.entry(channel_id) { @@ -860,7 +869,8 @@ where monitor.load_outputs_to_watch(chain_source, &self.logger); } entry.insert(MonitorHolder { monitor, pending_monitor_updates: Mutex::new(Vec::new()) }); - Ok(()) + + Ok(ChannelMonitorUpdateStatus::Completed) } } diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index c7011af43a9..e22b04570ce 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -1300,6 +1300,11 @@ pub(crate) struct ChannelMonitorImpl { // found at `Self::funding`. We don't use the term "renegotiated", as the currently locked // `FundingScope` could be one that was renegotiated. alternative_funding_confirmed: Option<(Txid, u32)>, + + /// [`ChannelMonitor`]s written by LDK prior to 0.1 need to be re-persisted after startup. To + /// make deciding whether to do so simple, here we track whether this monitor was last written + /// prior to 0.1. + written_by_0_1_or_later: bool, } // Macro helper to access holder commitment HTLC data (including both non-dust and dust) while @@ -1803,6 +1808,8 @@ impl ChannelMonitor { prev_holder_htlc_data: None, alternative_funding_confirmed: None, + + written_by_0_1_or_later: true, }) } @@ -1936,6 +1943,10 @@ impl ChannelMonitor { self.inner.lock().unwrap().get_funding_txo() } + pub(crate) fn written_by_0_1_or_later(&self) -> bool { + self.inner.lock().unwrap().written_by_0_1_or_later + } + /// Gets the funding script of the channel this ChannelMonitor is monitoring for. pub fn get_funding_script(&self) -> ScriptBuf { self.inner.lock().unwrap().get_funding_script() @@ -6080,6 +6091,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP (32, pending_funding, optional_vec), (34, alternative_funding_confirmed, option), }); + let written_by_0_1_or_later = payment_preimages_with_info.is_some(); if let Some(payment_preimages_with_info) = payment_preimages_with_info { if payment_preimages_with_info.len() != payment_preimages.len() { return Err(DecodeError::InvalidValue); @@ -6250,6 +6262,8 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP prev_holder_htlc_data, alternative_funding_confirmed, + + written_by_0_1_or_later, }))) } } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 92e0fdd65b4..90790479976 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -15402,7 +15402,7 @@ impl Readable for VecDeque<(Event, Option)> { /// the next step. /// /// If you wish to avoid this for performance reasons, use -/// [`ChainMonitor::load_post_0_1_existing_monitor`]. +/// [`ChainMonitor::load_existing_monitor`]. /// 7) Move the [`ChannelMonitor`]s into your local [`chain::Watch`]. If you're using a /// [`ChainMonitor`], this is done by calling [`chain::Watch::watch_channel`]. /// @@ -15417,7 +15417,7 @@ impl Readable for VecDeque<(Event, Option)> { /// which you've already broadcasted the transaction. /// /// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor -/// [`ChainMonitor::load_post_0_1_existing_monitor`]: crate::chain::chainmonitor::ChainMonitor::load_post_0_1_existing_monitor +/// [`ChainMonitor::load_existing_monitor`]: crate::chain::chainmonitor::ChainMonitor::load_existing_monitor pub struct ChannelManagerReadArgs< 'a, M: Deref, diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index fdbd7f8cc5a..955209ea21f 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1323,7 +1323,10 @@ pub fn _reload_node<'a, 'b, 'c>( for monitor in monitors_read.drain(..) { let channel_id = monitor.channel_id(); - assert_eq!(node.chain_monitor.load_post_0_1_existing_monitor(channel_id, monitor), Ok(())); + assert_eq!( + node.chain_monitor.load_existing_monitor(channel_id, monitor), + Ok(ChannelMonitorUpdateStatus::Completed), + ); check_added_monitors!(node, 1); } diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 918e445afab..ca4f8a4f1be 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -516,9 +516,9 @@ impl<'a> TestChainMonitor<'a> { self.chain_monitor.channel_monitor_updated(*channel_id, latest_update).unwrap(); } - pub fn load_post_0_1_existing_monitor( + pub fn load_existing_monitor( &self, channel_id: ChannelId, monitor: ChannelMonitor, - ) -> Result<(), ()> { + ) -> Result { #[cfg(feature = "std")] if let Some(blocker) = &*self.write_blocker.lock().unwrap() { blocker.recv().unwrap(); @@ -534,13 +534,15 @@ impl<'a> TestChainMonitor<'a> { ) .unwrap() .1; - assert!(new_monitor == monitor); + // Note that a ChannelMonitor might not round-trip exactly here as we have tests that were + // serialized prior to LDK 0.1 and re-serializing them will flip the "written after LDK + // 0.1" flag. self.latest_monitor_update_id .lock() .unwrap() .insert(channel_id, (monitor.get_latest_update_id(), monitor.get_latest_update_id())); self.added_monitors.lock().unwrap().push((channel_id, monitor)); - self.chain_monitor.load_post_0_1_existing_monitor(channel_id, new_monitor) + self.chain_monitor.load_existing_monitor(channel_id, new_monitor) } } impl<'a> chain::Watch for TestChainMonitor<'a> { From 76e1d55be438b3224bbaa9fb165a98dd3d2f0102 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 8 Aug 2025 18:31:12 +0000 Subject: [PATCH 3/3] f sp --- lightning/src/chain/chainmonitor.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 80d8f40d1bb..47d4d844434 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -833,7 +833,7 @@ where /// /// Note that this method *can* be used if additional blocks were replayed against the /// [`ChannelMonitor`], and in general can only *not* be used if a [`ChannelMonitorUpdate`] was - /// replayed against the [`ChannelMonitor`] which needs to be psersisted (i.e. the state has + /// replayed against the [`ChannelMonitor`] which needs to be persisted (i.e. the state has /// changed due to a [`ChannelMonitorUpdate`] such that it may be different after another /// restart). ///