From 01b1738aa4ca96edecec2ab606920f0170870ab6 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Thu, 16 Jan 2025 14:21:59 -0800 Subject: [PATCH 1/3] Use channel ID over funding outpoint to track monitors in ChainMonitor Once dual funding/splicing is supported, channels will no longer maintain their original funding outpoint. Ideally, we identify `ChannelMonitor`s with a stable identifier, such as the channel ID, which is fixed throughout the channel's lifetime. This commit replaces the use of funding outpoints as the key to the monitor index with the channel ID. Note that this is strictly done to the in-memory state; it does not change how we track monitors within their persisted state, they are still keyed by their funding outpoint there. Addressing that is left for follow-up work. --- lightning/src/chain/chainmonitor.rs | 160 +++++++++-------- lightning/src/chain/mod.rs | 4 +- lightning/src/ln/async_signer_tests.rs | 12 +- lightning/src/ln/chanmon_update_fail_tests.rs | 124 +++++++------ lightning/src/ln/channelmanager.rs | 14 +- lightning/src/ln/functional_test_utils.rs | 46 ++--- lightning/src/ln/functional_tests.rs | 61 ++++--- lightning/src/ln/monitor_tests.rs | 164 +++++++++--------- lightning/src/ln/priv_short_conf_tests.rs | 4 +- lightning/src/ln/reload_tests.rs | 31 ++-- lightning/src/ln/shutdown_tests.rs | 4 +- lightning/src/util/test_utils.rs | 34 ++-- 12 files changed, 325 insertions(+), 333 deletions(-) diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index f1b9f729cb3..efedb58f2d7 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -43,7 +43,6 @@ use crate::prelude::*; use crate::sync::{RwLock, RwLockReadGuard, Mutex, MutexGuard}; use core::ops::Deref; use core::sync::atomic::{AtomicUsize, Ordering}; -use bitcoin::hashes::Hash; use bitcoin::secp256k1::PublicKey; /// `Persist` defines behavior for persisting channel monitors: this could mean @@ -202,14 +201,14 @@ impl MonitorHolder { /// Note that this holds a mutex in [`ChainMonitor`] and may block other events until it is /// released. pub struct LockedChannelMonitor<'a, ChannelSigner: EcdsaChannelSigner> { - lock: RwLockReadGuard<'a, HashMap>>, - funding_txo: OutPoint, + lock: RwLockReadGuard<'a, HashMap>>, + channel_id: ChannelId, } impl Deref for LockedChannelMonitor<'_, ChannelSigner> { type Target = ChannelMonitor; fn deref(&self) -> &ChannelMonitor { - &self.lock.get(&self.funding_txo).expect("Checked at construction").monitor + &self.lock.get(&self.channel_id).expect("Checked at construction").monitor } } @@ -236,7 +235,7 @@ pub struct ChainMonitor, { - monitors: RwLock>>, + monitors: RwLock>>, chain_source: Option, broadcaster: T, logger: L, @@ -276,12 +275,12 @@ where C::Target: chain::Filter, FN: Fn(&ChannelMonitor, &TransactionData) -> Vec { let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down."; - let funding_outpoints = hash_set_from_iter(self.monitors.read().unwrap().keys().cloned()); - let channel_count = funding_outpoints.len(); - for funding_outpoint in funding_outpoints.iter() { + let channel_ids = hash_set_from_iter(self.monitors.read().unwrap().keys().cloned()); + let channel_count = channel_ids.len(); + for channel_id in channel_ids.iter() { let monitor_lock = self.monitors.read().unwrap(); - if let Some(monitor_state) = monitor_lock.get(funding_outpoint) { - if self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, &monitor_state, channel_count).is_err() { + if let Some(monitor_state) = monitor_lock.get(channel_id) { + if self.update_monitor_with_chain_data(header, best_height, txdata, &process, channel_id, &monitor_state, channel_count).is_err() { // Take the monitors lock for writing so that we poison it and any future // operations going forward fail immediately. core::mem::drop(monitor_lock); @@ -294,9 +293,9 @@ where C::Target: chain::Filter, // do some followup cleanup if any funding outpoints were added in between iterations let monitor_states = self.monitors.write().unwrap(); - for (funding_outpoint, monitor_state) in monitor_states.iter() { - if !funding_outpoints.contains(funding_outpoint) { - if self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, &monitor_state, channel_count).is_err() { + for (channel_id, monitor_state) in monitor_states.iter() { + if !channel_ids.contains(channel_id) { + if self.update_monitor_with_chain_data(header, best_height, txdata, &process, channel_id, &monitor_state, channel_count).is_err() { log_error!(self.logger, "{}", err_str); panic!("{}", err_str); } @@ -315,7 +314,7 @@ where C::Target: chain::Filter, } fn update_monitor_with_chain_data( - &self, header: &Header, best_height: Option, txdata: &TransactionData, process: FN, funding_outpoint: &OutPoint, + &self, header: &Header, best_height: Option, txdata: &TransactionData, process: FN, channel_id: &ChannelId, monitor_state: &MonitorHolder, channel_count: usize, ) -> Result<(), ()> where FN: Fn(&ChannelMonitor, &TransactionData) -> Vec { let monitor = &monitor_state.monitor; @@ -323,11 +322,10 @@ where C::Target: chain::Filter, let mut txn_outputs = process(monitor, txdata); - let get_partition_key = |funding_outpoint: &OutPoint| { - let funding_txid_hash = funding_outpoint.txid.to_raw_hash(); - let funding_txid_hash_bytes = funding_txid_hash.as_byte_array(); - let funding_txid_u32 = u32::from_be_bytes([funding_txid_hash_bytes[0], funding_txid_hash_bytes[1], funding_txid_hash_bytes[2], funding_txid_hash_bytes[3]]); - funding_txid_u32.wrapping_add(best_height.unwrap_or_default()) + let get_partition_key = |channel_id: &ChannelId| { + let channel_id_bytes = channel_id.0; + let channel_id_u32 = u32::from_be_bytes([channel_id_bytes[0], channel_id_bytes[1], channel_id_bytes[2], channel_id_bytes[3]]); + channel_id_u32.wrapping_add(best_height.unwrap_or_default()) }; let partition_factor = if channel_count < 15 { @@ -337,14 +335,15 @@ where C::Target: chain::Filter, }; let has_pending_claims = monitor_state.monitor.has_pending_claims(); - if has_pending_claims || get_partition_key(funding_outpoint) % partition_factor == 0 { + if has_pending_claims || get_partition_key(channel_id) % partition_factor == 0 { log_trace!(logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor)); // Even though we don't track monitor updates from chain-sync as pending, we still want // updates per-channel to be well-ordered so that users don't see a // `ChannelMonitorUpdate` after a channel persist for a channel with the same // `latest_update_id`. let _pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap(); - match self.persister.update_persisted_channel(*funding_outpoint, None, monitor) { + let funding_txo = monitor.get_funding_txo().0; + match self.persister.update_persisted_channel(funding_txo, None, monitor) { ChannelMonitorUpdateStatus::Completed => log_trace!(logger, "Finished syncing Channel Monitor for channel {} for block-data", log_funding_info!(monitor) @@ -410,9 +409,9 @@ where C::Target: chain::Filter, pub fn get_claimable_balances(&self, ignored_channels: &[&ChannelDetails]) -> Vec { let mut ret = Vec::new(); let monitor_states = self.monitors.read().unwrap(); - for (_, monitor_state) in monitor_states.iter().filter(|(funding_outpoint, _)| { + for (_, monitor_state) in monitor_states.iter().filter(|(channel_id, _)| { for chan in ignored_channels { - if chan.funding_txo.as_ref() == Some(funding_outpoint) { + if chan.channel_id == **channel_id { return false; } } @@ -428,10 +427,10 @@ where C::Target: chain::Filter, /// /// Note that the result holds a mutex over our monitor set, and should not be held /// indefinitely. - pub fn get_monitor(&self, funding_txo: OutPoint) -> Result, ()> { + pub fn get_monitor(&self, channel_id: ChannelId) -> Result, ()> { let lock = self.monitors.read().unwrap(); - if lock.get(&funding_txo).is_some() { - Ok(LockedChannelMonitor { lock, funding_txo }) + if lock.get(&channel_id).is_some() { + Ok(LockedChannelMonitor { lock, channel_id }) } else { Err(()) } @@ -441,39 +440,36 @@ where C::Target: chain::Filter, /// /// Note that [`ChannelMonitor`]s are not removed when a channel is closed as they are always /// monitoring for on-chain state resolutions. - pub fn list_monitors(&self) -> Vec<(OutPoint, ChannelId)> { - self.monitors.read().unwrap().iter().map(|(outpoint, monitor_holder)| { - let channel_id = monitor_holder.monitor.channel_id(); - (*outpoint, channel_id) - }).collect() + pub fn list_monitors(&self) -> Vec { + self.monitors.read().unwrap().iter().map(|(channel_id, _)| *channel_id).collect() } #[cfg(not(c_bindings))] - /// Lists the pending updates for each [`ChannelMonitor`] (by `OutPoint` being monitored). + /// Lists the pending updates for each [`ChannelMonitor`] (by `ChannelId` being monitored). /// Each `Vec` contains `update_id`s from [`ChannelMonitor::get_latest_update_id`] for updates /// that have not yet been fully persisted. Note that if a full monitor is persisted all the pending /// monitor updates must be individually marked completed by calling [`ChainMonitor::channel_monitor_updated`]. - pub fn list_pending_monitor_updates(&self) -> HashMap> { - hash_map_from_iter(self.monitors.read().unwrap().iter().map(|(outpoint, holder)| { - (*outpoint, holder.pending_monitor_updates.lock().unwrap().clone()) + pub fn list_pending_monitor_updates(&self) -> HashMap> { + hash_map_from_iter(self.monitors.read().unwrap().iter().map(|(channel_id, holder)| { + (*channel_id, holder.pending_monitor_updates.lock().unwrap().clone()) })) } #[cfg(c_bindings)] - /// Lists the pending updates for each [`ChannelMonitor`] (by `OutPoint` being monitored). + /// Lists the pending updates for each [`ChannelMonitor`] (by `ChannelId` being monitored). /// Each `Vec` contains `update_id`s from [`ChannelMonitor::get_latest_update_id`] for updates /// that have not yet been fully persisted. Note that if a full monitor is persisted all the pending /// monitor updates must be individually marked completed by calling [`ChainMonitor::channel_monitor_updated`]. - pub fn list_pending_monitor_updates(&self) -> Vec<(OutPoint, Vec)> { - self.monitors.read().unwrap().iter().map(|(outpoint, holder)| { - (*outpoint, holder.pending_monitor_updates.lock().unwrap().clone()) + pub fn list_pending_monitor_updates(&self) -> Vec<(ChannelId, Vec)> { + self.monitors.read().unwrap().iter().map(|(channel_id, holder)| { + (*channel_id, holder.pending_monitor_updates.lock().unwrap().clone()) }).collect() } #[cfg(test)] - pub fn remove_monitor(&self, funding_txo: &OutPoint) -> ChannelMonitor { - self.monitors.write().unwrap().remove(funding_txo).unwrap().monitor + pub fn remove_monitor(&self, channel_id: &ChannelId) -> ChannelMonitor { + self.monitors.write().unwrap().remove(channel_id).unwrap().monitor } /// Indicates the persistence of a [`ChannelMonitor`] has completed after @@ -496,10 +492,10 @@ where C::Target: chain::Filter, /// /// Returns an [`APIError::APIMisuseError`] if `funding_txo` does not match any currently /// registered [`ChannelMonitor`]s. - pub fn channel_monitor_updated(&self, funding_txo: OutPoint, completed_update_id: u64) -> Result<(), APIError> { + pub fn channel_monitor_updated(&self, channel_id: ChannelId, completed_update_id: u64) -> Result<(), APIError> { let monitors = self.monitors.read().unwrap(); - let monitor_data = if let Some(mon) = monitors.get(&funding_txo) { mon } else { - return Err(APIError::APIMisuseError { err: format!("No ChannelMonitor matching funding outpoint {:?} found", funding_txo) }); + let monitor_data = if let Some(mon) = monitors.get(&channel_id) { mon } else { + return Err(APIError::APIMisuseError { err: format!("No ChannelMonitor matching channel ID {:?} found", channel_id) }); }; let mut pending_monitor_updates = monitor_data.pending_monitor_updates.lock().unwrap(); pending_monitor_updates.retain(|update_id| *update_id != completed_update_id); @@ -507,9 +503,9 @@ where C::Target: chain::Filter, // Note that we only check for pending non-chainsync monitor updates and we don't track monitor // updates resulting from chainsync in `pending_monitor_updates`. let monitor_is_pending_updates = monitor_data.has_pending_updates(&pending_monitor_updates); - log_debug!(self.logger, "Completed off-chain monitor update {} for channel with funding outpoint {:?}, {}", + log_debug!(self.logger, "Completed off-chain monitor update {} for channel with channel ID {:?}, {}", completed_update_id, - funding_txo, + channel_id, if monitor_is_pending_updates { "still have pending off-chain updates" } else { @@ -520,9 +516,10 @@ where C::Target: chain::Filter, // Completed event. return Ok(()); } - let channel_id = monitor_data.monitor.channel_id(); + let funding_txo = monitor_data.monitor.get_funding_txo().0; self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed { - funding_txo, channel_id, + funding_txo, + channel_id, monitor_update_id: monitor_data.monitor.get_latest_update_id(), }], monitor_data.monitor.get_counterparty_node_id())); @@ -534,13 +531,11 @@ where C::Target: chain::Filter, /// chain::Watch API wherein we mark a monitor fully-updated by just calling /// channel_monitor_updated once with the highest ID. #[cfg(any(test, fuzzing))] - pub fn force_channel_monitor_updated(&self, funding_txo: OutPoint, monitor_update_id: u64) { + pub fn force_channel_monitor_updated(&self, channel_id: ChannelId, monitor_update_id: u64) { let monitors = self.monitors.read().unwrap(); - let (counterparty_node_id, channel_id) = if let Some(m) = monitors.get(&funding_txo) { - (m.monitor.get_counterparty_node_id(), m.monitor.channel_id()) - } else { - (None, ChannelId::v1_from_funding_outpoint(funding_txo)) - }; + let monitor = &monitors.get(&channel_id).unwrap().monitor; + let counterparty_node_id = monitor.get_counterparty_node_id(); + let funding_txo = monitor.get_funding_txo().0; self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed { funding_txo, channel_id, @@ -570,10 +565,10 @@ where C::Target: chain::Filter, // Sadly we can't hold the monitors read lock through an async call. Thus we have to do a // crazy dance to process a monitor's events then only remove them once we've done so. let mons_to_process = self.monitors.read().unwrap().keys().cloned().collect::>(); - for funding_txo in mons_to_process { + for channel_id in mons_to_process { let mut ev; match super::channelmonitor::process_events_body!( - self.monitors.read().unwrap().get(&funding_txo).map(|m| &m.monitor), self.logger, ev, handler(ev).await) { + self.monitors.read().unwrap().get(&channel_id).map(|m| &m.monitor), self.logger, ev, handler(ev).await) { Ok(()) => {}, Err(ReplayEvent ()) => { self.event_notifier.notify(); @@ -612,10 +607,10 @@ where C::Target: chain::Filter, /// signature generation failure. /// /// `monitor_opt` can be used as a filter to only trigger them for a specific channel monitor. - pub fn signer_unblocked(&self, monitor_opt: Option) { + pub fn signer_unblocked(&self, monitor_opt: Option) { let monitors = self.monitors.read().unwrap(); - if let Some(funding_txo) = monitor_opt { - if let Some(monitor_holder) = monitors.get(&funding_txo) { + if let Some(channel_id) = monitor_opt { + if let Some(monitor_holder) = monitors.get(&channel_id) { monitor_holder.monitor.signer_unblocked( &*self.broadcaster, &*self.fee_estimator, &self.logger ) @@ -640,27 +635,27 @@ where C::Target: chain::Filter, /// data could be moved to an archive location or removed entirely. pub fn archive_fully_resolved_channel_monitors(&self) { let mut have_monitors_to_prune = false; - for (funding_txo, monitor_holder) in self.monitors.read().unwrap().iter() { + for (_, monitor_holder) in self.monitors.read().unwrap().iter() { let logger = WithChannelMonitor::from(&self.logger, &monitor_holder.monitor, None); let (is_fully_resolved, needs_persistence) = monitor_holder.monitor.check_and_update_full_resolution_status(&logger); if is_fully_resolved { have_monitors_to_prune = true; } if needs_persistence { - self.persister.update_persisted_channel(*funding_txo, None, &monitor_holder.monitor); + self.persister.update_persisted_channel(monitor_holder.monitor.get_funding_txo().0, None, &monitor_holder.monitor); } } if have_monitors_to_prune { let mut monitors = self.monitors.write().unwrap(); - monitors.retain(|funding_txo, monitor_holder| { + monitors.retain(|channel_id, monitor_holder| { let logger = WithChannelMonitor::from(&self.logger, &monitor_holder.monitor, None); let (is_fully_resolved, _) = monitor_holder.monitor.check_and_update_full_resolution_status(&logger); if is_fully_resolved { log_info!(logger, - "Archiving fully resolved ChannelMonitor for funding txo {}", - funding_txo + "Archiving fully resolved ChannelMonitor for channel ID {}", + channel_id ); - self.persister.archive_persisted_channel(*funding_txo); + self.persister.archive_persisted_channel(monitor_holder.monitor.get_funding_txo().0); false } else { true @@ -761,12 +756,12 @@ where C::Target: chain::Filter, L::Target: Logger, P::Target: Persist, { - fn watch_channel(&self, funding_outpoint: OutPoint, monitor: ChannelMonitor) -> Result { + fn watch_channel(&self, channel_id: ChannelId, monitor: ChannelMonitor) -> Result { let logger = WithChannelMonitor::from(&self.logger, &monitor, None); let mut monitors = self.monitors.write().unwrap(); - let entry = match monitors.entry(funding_outpoint) { + let entry = match monitors.entry(channel_id) { hash_map::Entry::Occupied(_) => { - log_error!(logger, "Failed to add new channel data: channel monitor for given outpoint is already present"); + log_error!(logger, "Failed to add new channel data: channel monitor for given channel ID is already present"); return Err(()); }, hash_map::Entry::Vacant(e) => e, @@ -774,7 +769,7 @@ where C::Target: chain::Filter, log_trace!(logger, "Got new ChannelMonitor for channel {}", log_funding_info!(monitor)); let update_id = monitor.get_latest_update_id(); let mut pending_monitor_updates = Vec::new(); - let persist_res = self.persister.persist_new_channel(funding_outpoint, &monitor); + let persist_res = self.persister.persist_new_channel(monitor.get_funding_txo().0, &monitor); match persist_res { ChannelMonitorUpdateStatus::InProgress => { log_info!(logger, "Persistence of new ChannelMonitor for channel {} in progress", log_funding_info!(monitor)); @@ -799,13 +794,13 @@ where C::Target: chain::Filter, Ok(persist_res) } - fn update_channel(&self, funding_txo: OutPoint, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus { + fn update_channel(&self, channel_id: ChannelId, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus { // `ChannelMonitorUpdate`'s `channel_id` is `None` prior to 0.0.121 and all channels in those // versions are V1-established. For 0.0.121+ the `channel_id` fields is always `Some`. - let channel_id = update.channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(funding_txo)); + let channel_id = update.channel_id.unwrap_or(channel_id); // Update the monitor that watches the channel referred to by the given outpoint. let monitors = self.monitors.read().unwrap(); - match monitors.get(&funding_txo) { + match monitors.get(&channel_id) { None => { let logger = WithContext::from(&self.logger, update.counterparty_node_id, Some(channel_id), None); log_error!(logger, "Failed to update channel monitor: no such monitor registered"); @@ -830,6 +825,7 @@ where C::Target: chain::Filter, let update_res = monitor.update_monitor(update, &self.broadcaster, &self.fee_estimator, &self.logger); let update_id = update.update_id; + let funding_txo = monitor.get_funding_txo().0; let persist_res = if update_res.is_err() { // Even if updating the monitor returns an error, the monitor's state will // still be changed. Therefore, we should persist the updated monitor despite the error. @@ -882,10 +878,10 @@ where C::Target: chain::Filter, for monitor_state in self.monitors.read().unwrap().values() { let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events(); if monitor_events.len() > 0 { - let monitor_outpoint = monitor_state.monitor.get_funding_txo().0; + let monitor_funding_txo = monitor_state.monitor.get_funding_txo().0; let monitor_channel_id = monitor_state.monitor.channel_id(); let counterparty_node_id = monitor_state.monitor.get_counterparty_node_id(); - pending_monitor_events.push((monitor_outpoint, monitor_channel_id, monitor_events, counterparty_node_id)); + pending_monitor_events.push((monitor_funding_txo, monitor_channel_id, monitor_events, counterparty_node_id)); } } pending_monitor_events @@ -946,7 +942,7 @@ mod tests { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 1); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; // Route two payments to be claimed at the same time. let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); @@ -963,7 +959,7 @@ mod tests { let persistences = chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clone(); assert_eq!(persistences.len(), 1); - let (funding_txo, updates) = persistences.iter().next().unwrap(); + let (_, updates) = persistences.iter().next().unwrap(); assert_eq!(updates.len(), 2); // Note that updates is a HashMap so the ordering here is actually random. This shouldn't @@ -972,23 +968,23 @@ mod tests { let next_update = update_iter.next().unwrap().clone(); // Should contain next_update when pending updates listed. #[cfg(not(c_bindings))] - assert!(nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().get(funding_txo) + assert!(nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().get(&channel_id) .unwrap().contains(&next_update)); #[cfg(c_bindings)] assert!(nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().iter() - .find(|(txo, _)| txo == funding_txo).unwrap().1.contains(&next_update)); - nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, next_update.clone()).unwrap(); + .find(|(chan_id, _)| chan_id == channel_id).unwrap().1.contains(&next_update)); + nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(channel_id, next_update.clone()).unwrap(); // Should not contain the previously pending next_update when pending updates listed. #[cfg(not(c_bindings))] - assert!(!nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().get(funding_txo) + assert!(!nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().get(&channel_id) .unwrap().contains(&next_update)); #[cfg(c_bindings)] assert!(!nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().iter() - .find(|(txo, _)| txo == funding_txo).unwrap().1.contains(&next_update)); + .find(|(chan_id, _)| txo == channel_id).unwrap().1.contains(&next_update)); assert!(nodes[1].chain_monitor.release_pending_monitor_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); - nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, update_iter.next().unwrap().clone()).unwrap(); + nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(channel_id, update_iter.next().unwrap().clone()).unwrap(); let claim_events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(claim_events.len(), 2); diff --git a/lightning/src/chain/mod.rs b/lightning/src/chain/mod.rs index 36b1ce57309..f119cb9ca0f 100644 --- a/lightning/src/chain/mod.rs +++ b/lightning/src/chain/mod.rs @@ -276,7 +276,7 @@ pub trait Watch { /// [`get_outputs_to_watch`]: channelmonitor::ChannelMonitor::get_outputs_to_watch /// [`block_connected`]: channelmonitor::ChannelMonitor::block_connected /// [`block_disconnected`]: channelmonitor::ChannelMonitor::block_disconnected - fn watch_channel(&self, funding_txo: OutPoint, monitor: ChannelMonitor) -> Result; + fn watch_channel(&self, channel_id: ChannelId, monitor: ChannelMonitor) -> Result; /// Updates a channel identified by `funding_txo` by applying `update` to its monitor. /// @@ -293,7 +293,7 @@ pub trait Watch { /// [`ChannelMonitorUpdateStatus::UnrecoverableError`], see its documentation for more info. /// /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager - fn update_channel(&self, funding_txo: OutPoint, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus; + fn update_channel(&self, channel_id: ChannelId, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus; /// Returns any monitor events since the last call. Subsequent calls must only return new /// events. diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index bfac345a4c6..ff33981687b 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -489,8 +489,8 @@ fn do_test_async_raa_peer_disconnect(test_case: UnblockSignerAcrossDisconnectCas if test_case == UnblockSignerAcrossDisconnectCase::BeforeMonitorRestored { dst.enable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, block_raa_signer_op); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = dst.chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); - dst.chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = dst.chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); + dst.chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id, latest_update); check_added_monitors!(dst, 0); } @@ -614,8 +614,8 @@ fn do_test_async_commitment_signature_peer_disconnect(test_case: UnblockSignerAc if test_case == UnblockSignerAcrossDisconnectCase::BeforeMonitorRestored { dst.enable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, SignerOp::SignCounterpartyCommitment); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = dst.chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); - dst.chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = dst.chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); + dst.chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id, latest_update); check_added_monitors!(dst, 0); } @@ -737,8 +737,8 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { if monitor_update_failure { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); - nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id, latest_update); check_added_monitors!(nodes[0], 0); } diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 89a69321bc6..22061e0c6aa 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -16,7 +16,6 @@ use bitcoin::constants::genesis_block; use bitcoin::hash_types::BlockHash; use bitcoin::network::Network; use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor}; -use crate::chain::transaction::OutPoint; use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch}; use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination}; use crate::ln::channelmanager::{RAACommitmentOrder, PaymentSendFailure, PaymentId, RecipientOnionFields}; @@ -50,7 +49,6 @@ fn test_monitor_and_persister_update_fail() { // Create some initial channel let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - let outpoint = OutPoint { txid: chan.3.compute_txid(), index: 0 }; // Rebalance the network to generate htlc in the two directions send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000); @@ -74,14 +72,14 @@ fn test_monitor_and_persister_update_fail() { }; let chain_mon = { let new_monitor = { - let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap(); + let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(chan.2).unwrap(); let new_monitor = <(BlockHash, ChannelMonitor)>::read( &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1; assert!(new_monitor == *monitor); new_monitor }; let chain_mon = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); - assert_eq!(chain_mon.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed)); + assert_eq!(chain_mon.watch_channel(chan.2, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed)); chain_mon }; chain_mon.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), 200); @@ -102,12 +100,12 @@ fn test_monitor_and_persister_update_fail() { if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { // Check that the persister returns InProgress (and will never actually complete) // as the monitor update errors. - if let ChannelMonitorUpdateStatus::InProgress = chain_mon.chain_monitor.update_channel(outpoint, &update) {} else { panic!("Expected monitor paused"); } + if let ChannelMonitorUpdateStatus::InProgress = chain_mon.chain_monitor.update_channel(chan.2, &update) {} else { panic!("Expected monitor paused"); } logger.assert_log_regex("lightning::chain::chainmonitor", regex::Regex::new("Failed to update ChannelMonitor for channel [0-9a-f]*.").unwrap(), 1); // Apply the monitor update to the original ChainMonitor, ensuring the // ChannelManager and ChannelMonitor aren't out of sync. - assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), + assert_eq!(nodes[0].chain_monitor.update_channel(chan.2, &update), ChannelMonitorUpdateStatus::Completed); } else { assert!(false); } } else { @@ -152,8 +150,8 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { } chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[0], 0); let mut events_2 = nodes[0].node.get_and_clear_pending_msg_events(); @@ -313,8 +311,8 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { // Now fix monitor updating... chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[0], 0); macro_rules! disconnect_reconnect_peers { () => { { @@ -622,8 +620,8 @@ fn test_monitor_update_fail_cs() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[1], 0); let responses = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(responses.len(), 2); @@ -655,8 +653,8 @@ fn test_monitor_update_fail_cs() { } chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[0], 0); let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); @@ -717,8 +715,8 @@ fn test_monitor_update_fail_no_rebroadcast() { check_added_monitors!(nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 0); expect_pending_htlcs_forwardable!(nodes[1]); @@ -779,8 +777,8 @@ fn test_monitor_update_raa_while_paused() { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); - let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[0], 0); let as_update_raa = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); @@ -905,8 +903,8 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Restore monitor updating, ensuring we immediately get a fail-back update and a // update_add update. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_2.2, latest_update); check_added_monitors!(nodes[1], 0); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); check_added_monitors!(nodes[1], 1); @@ -1160,8 +1158,8 @@ fn test_monitor_update_fail_reestablish() { .contents.channel_flags & 2, 0); // The "disabled" bit should be unset as we just reconnected chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_1.2, latest_update); check_added_monitors!(nodes[1], 0); updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1238,8 +1236,8 @@ fn raa_no_response_awaiting_raa_state() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); - let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); // nodes[1] should be AwaitingRAA here! check_added_monitors!(nodes[1], 0); let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1360,8 +1358,8 @@ fn claim_while_disconnected_monitor_update_fail() { // Now un-fail the monitor, which will result in B sending its original commitment update, // receiving the commitment update from A, and the resulting commitment dances. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[1], 0); let bs_msgs = nodes[1].node.get_and_clear_pending_msg_events(); @@ -1475,8 +1473,8 @@ fn monitor_failed_no_reestablish_response() { let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[1], 0); let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1568,8 +1566,8 @@ fn first_message_on_recv_ordering() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[1], 0); expect_pending_htlcs_forwardable!(nodes[1]); @@ -1659,8 +1657,8 @@ fn test_monitor_update_fail_claim() { // Now restore monitor updating on the 0<->1 channel and claim the funds on B. let channel_id = chan_1.2; - let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); check_added_monitors!(nodes[1], 0); @@ -1759,8 +1757,8 @@ fn test_monitor_update_on_pending_forwards() { check_added_monitors!(nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_1.2, latest_update); check_added_monitors!(nodes[1], 0); let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1826,8 +1824,8 @@ fn monitor_update_claim_fail_no_response() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); check_added_monitors!(nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1866,7 +1864,7 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); - let channel_id = ChannelId::v1_from_funding_outpoint(OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }); + let channel_id = ChannelId::v1_from_funding_txid(funding_created_msg.funding_txid.as_byte_array(), funding_created_msg.funding_output_index); nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msg); check_added_monitors!(nodes[1], 1); @@ -1876,8 +1874,8 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[0], 0); expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id()); @@ -1922,8 +1920,8 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: } chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[1], 0); let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first { @@ -2024,8 +2022,8 @@ fn test_path_paused_mpp() { // And check that, after we successfully update the monitor for chan_2 we can pass the second // HTLC along to nodes[3] and claim the whole payment back to nodes[0]. - let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2_id).unwrap().clone(); - nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_2_id, latest_update); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), true, None); @@ -2401,8 +2399,8 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { // If we finish updating the monitor, we should free the holding cell right away (this did // not occur prior to #756). chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (funding_txo, mon_id, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); - nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_txo, mon_id); + let (mon_id, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id, mon_id); expect_payment_claimed!(nodes[0], payment_hash_0, 100_000); // New outbound messages should be generated immediately upon a call to @@ -2620,15 +2618,15 @@ fn test_temporary_error_during_shutdown() { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); nodes[1].node.handle_closing_signed(nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id())); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); nodes[0].node.handle_closing_signed(nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id())); let (_, closing_signed_a) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); @@ -2663,7 +2661,7 @@ fn double_temp_error() { // `claim_funds` results in a ChannelMonitorUpdate. nodes[1].node.claim_funds(payment_preimage_1); check_added_monitors!(nodes[1], 1); - let (funding_tx, latest_update_1, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update_1, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); // Previously, this would've panicked due to a double-call to `Channel::monitor_update_failed`, @@ -2672,11 +2670,11 @@ fn double_temp_error() { check_added_monitors!(nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (_, latest_update_2, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_1); + let (latest_update_2, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update_1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 0); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_2); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update_2); // Complete the first HTLC. Note that as a side-effect we handle the monitor update completions // and get both PaymentClaimed events at once. @@ -3119,8 +3117,8 @@ fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_ // (Finally) complete the A <-> B ChannelMonitorUpdate, ensuring the preimage is durably on // disk in the proper ChannelMonitor, unblocking the B <-> C ChannelMonitor updating // process. - let (outpoint, _, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, ab_update_id).unwrap(); + let (_, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(chan_id_ab, ab_update_id).unwrap(); // When we fetch B's HTLC update messages next (now that the ChannelMonitorUpdate has // completed), it will also release the final RAA ChannelMonitorUpdate on the B <-> C @@ -3147,8 +3145,8 @@ fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_ // ChannelMonitorUpdate hasn't yet completed. reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); - let (outpoint, _, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, ab_update_id).unwrap(); + let (_, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(chan_id_ab, ab_update_id).unwrap(); // The ChannelMonitorUpdate which was completed prior to the reconnect only contained the // preimage (as it was a replay of the original ChannelMonitorUpdate from before we @@ -3314,8 +3312,8 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool, // Once the blocked `ChannelMonitorUpdate` *finally* completes, the pending // `PaymentForwarded` event will finally be released. - let (outpoint, ab_update_id, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, ab_update_id); + let (ab_update_id, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id_ab, ab_update_id); // If the A<->B channel was closed before we reload, we'll replay the claim against it on // reload, causing the `PaymentForwarded` event to get replayed. @@ -3391,8 +3389,8 @@ fn test_sync_async_persist_doesnt_hang() { // Immediately complete the monitor update, but before the ChannelManager has a chance to see // the MonitorEvent::Completed, create a channel update by receiving a claim on the second // payment. - let (outpoint, _, ab_update_id) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone(); - nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, ab_update_id).unwrap(); + let (_, ab_update_id) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(chan_id_ab, ab_update_id).unwrap(); nodes[1].node.claim_funds(payment_preimage_2); check_added_monitors(&nodes[1], 1); @@ -3504,7 +3502,7 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { mine_transaction_without_consistency_checks(&nodes[1], &as_closing_tx[0]); } - let bc_update_id = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_bc).unwrap().2; + let bc_update_id = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_bc).unwrap().1; let mut events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), if close_during_reload { 2 } else { 1 }); expect_payment_forwarded(events.pop().unwrap(), &nodes[1], &nodes[0], &nodes[2], Some(1000), @@ -3520,7 +3518,7 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { // Once we run event processing the monitor should free, check that it was indeed the B<->C // channel which was updated. check_added_monitors(&nodes[1], if close_during_reload { 2 } else { 1 }); - let post_ev_bc_update_id = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_bc).unwrap().2; + let post_ev_bc_update_id = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_bc).unwrap().1; assert!(bc_update_id != post_ev_bc_update_id); // Finally, check that there's nothing left to do on B<->C reconnect and the channel operates diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index b6f1c032f42..232e763a8a2 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1700,7 +1700,7 @@ where /// /// // Move the monitors to the ChannelManager's chain::Watch parameter /// for monitor in channel_monitors { -/// chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor); +/// chain_monitor.watch_channel(monitor.channel_id(), monitor); /// } /// # Ok(()) /// # } @@ -3295,7 +3295,7 @@ macro_rules! handle_new_monitor_update { $in_flight_updates.len() - 1 }); if $self.background_events_processed_since_startup.load(Ordering::Acquire) { - let update_res = $self.chain_monitor.update_channel($funding_txo, &$in_flight_updates[$update_idx]); + let update_res = $self.chain_monitor.update_channel($chan_id, &$in_flight_updates[$update_idx]); handle_new_monitor_update!($self, update_res, $logger, $chan_id, _internal, $completed) } else { // We blindly assume that the ChannelMonitorUpdate will be regenerated on startup if we @@ -6307,10 +6307,10 @@ where for event in background_events.drain(..) { match event { - BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, _channel_id, update)) => { + BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((_funding_txo, channel_id, update)) => { // The channel has already been closed, so no use bothering to care about the // monitor updating completing. - let _ = self.chain_monitor.update_channel(funding_txo, &update); + let _ = self.chain_monitor.update_channel(channel_id, &update); }, BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => { self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update); @@ -8116,7 +8116,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ fail_chan!("The funding_created message had the same funding_txid as an existing channel - funding is not possible"); }, hash_map::Entry::Vacant(i_e) => { - let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor); + let monitor_res = self.chain_monitor.watch_channel(monitor.channel_id(), monitor); if let Ok(persist_state) = monitor_res { i_e.insert(chan.context.get_counterparty_node_id()); mem::drop(outpoint_to_peer_lock); @@ -8175,7 +8175,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ chan.funding_signed(&msg, best_block, &self.signer_provider, &&logger); match res { Ok((mut chan, monitor)) => { - if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) { + if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.channel_id(), monitor) { // We really should be able to insert here without doing a second // lookup, but sadly rust stdlib doesn't currently allow keeping // the original Entry around with the value removed. @@ -8860,7 +8860,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let monitor = try_chan_phase_entry!( self, peer_state, chan.commitment_signed_initial_v2(msg, best_block, &self.signer_provider, &&logger), chan_phase_entry); - let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor); + let monitor_res = self.chain_monitor.watch_channel(monitor.channel_id(), monitor); if let Ok(persist_state) = monitor_res { handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index e95b3334850..720257b415c 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -255,8 +255,8 @@ pub fn connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: &Block) fn call_claimable_balances<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>) { // Ensure `get_claimable_balances`' self-tests never panic - for (funding_outpoint, _channel_id) in node.chain_monitor.chain_monitor.list_monitors() { - node.chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances(); + for channel_id in node.chain_monitor.chain_monitor.list_monitors() { + node.chain_monitor.chain_monitor.get_monitor(channel_id).unwrap().get_claimable_balances(); } } @@ -554,9 +554,7 @@ impl<'a, 'b, 'c> Node<'a, 'b, 'c> { channel_keys_id = Some(chan.channel_keys_id); } - let monitor = self.chain_monitor.chain_monitor.list_monitors().into_iter() - .find(|(_, channel_id)| *channel_id == *chan_id) - .and_then(|(funding_txo, _)| self.chain_monitor.chain_monitor.get_monitor(funding_txo).ok()); + let monitor = self.chain_monitor.chain_monitor.get_monitor(*chan_id).ok(); if let Some(monitor) = monitor { monitor.do_mut_signer_call(|signer| { channel_keys_id = channel_keys_id.or(Some(signer.inner.channel_keys_id())); @@ -690,9 +688,9 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { let feeest = test_utils::TestFeeEstimator::new(253); let mut deserialized_monitors = Vec::new(); { - for (outpoint, _channel_id) in self.chain_monitor.chain_monitor.list_monitors() { + for channel_id in self.chain_monitor.chain_monitor.list_monitors() { let mut w = test_utils::TestVecWriter(Vec::new()); - self.chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut w).unwrap(); + self.chain_monitor.chain_monitor.get_monitor(channel_id).unwrap().write(&mut w).unwrap(); let (_, deserialized_monitor) = <(BlockHash, ChannelMonitor)>::read( &mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager)).unwrap(); deserialized_monitors.push(deserialized_monitor); @@ -734,8 +732,8 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { let chain_source = test_utils::TestChainSource::new(Network::Testnet); let chain_monitor = test_utils::TestChainMonitor::new(Some(&chain_source), &broadcaster, &self.logger, &feeest, &persister, &self.keys_manager); for deserialized_monitor in deserialized_monitors.drain(..) { - let funding_outpoint = deserialized_monitor.get_funding_txo().0; - if chain_monitor.watch_channel(funding_outpoint, deserialized_monitor) != Ok(ChannelMonitorUpdateStatus::Completed) { + let channel_id = deserialized_monitor.channel_id(); + if chain_monitor.watch_channel(channel_id, deserialized_monitor) != Ok(ChannelMonitorUpdateStatus::Completed) { panic!(); } } @@ -1033,20 +1031,7 @@ macro_rules! get_channel_type_features { macro_rules! get_monitor { ($node: expr, $channel_id: expr) => { { - use bitcoin::hashes::Hash; - let mut monitor = None; - // Assume funding vout is either 0 or 1 blindly - for index in 0..2 { - if let Ok(mon) = $node.chain_monitor.chain_monitor.get_monitor( - $crate::chain::transaction::OutPoint { - txid: bitcoin::Txid::from_slice(&$channel_id.0[..]).unwrap(), index - }) - { - monitor = Some(mon); - break; - } - } - monitor.unwrap() + $node.chain_monitor.chain_monitor.get_monitor($channel_id).unwrap() } } } @@ -1170,8 +1155,8 @@ pub fn _reload_node<'a, 'b, 'c>(node: &'a Node<'a, 'b, 'c>, default_config: User assert!(node_read.is_empty()); for monitor in monitors_read.drain(..) { - let funding_outpoint = monitor.get_funding_txo().0; - assert_eq!(node.chain_monitor.watch_channel(funding_outpoint, monitor), + let channel_id = monitor.channel_id(); + assert_eq!(node.chain_monitor.watch_channel(channel_id, monitor), Ok(ChannelMonitorUpdateStatus::Completed)); check_added_monitors!(node, 1); } @@ -1275,7 +1260,7 @@ pub fn create_dual_funding_utxos_with_prev_txs( } pub fn sign_funding_transaction<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, channel_value: u64, expected_temporary_channel_id: ChannelId) -> Transaction { - let (temporary_channel_id, tx, funding_output) = create_funding_transaction(node_a, &node_b.node.get_our_node_id(), channel_value, 42); + let (temporary_channel_id, tx, _) = create_funding_transaction(node_a, &node_b.node.get_our_node_id(), channel_value, 42); assert_eq!(temporary_channel_id, expected_temporary_channel_id); assert!(node_a.node.funding_transaction_generated(temporary_channel_id, node_b.node.get_our_node_id(), tx.clone()).is_ok()); @@ -1283,11 +1268,16 @@ pub fn sign_funding_transaction<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: & let funding_created_msg = get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id()); assert_eq!(funding_created_msg.temporary_channel_id, expected_temporary_channel_id); + + let channel_id = ChannelId::v1_from_funding_txid( + funding_created_msg.funding_txid.as_byte_array(), funding_created_msg.funding_output_index + ); + node_b.node.handle_funding_created(node_a.node.get_our_node_id(), &funding_created_msg); { let mut added_monitors = node_b.chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); - assert_eq!(added_monitors[0].0, funding_output); + assert_eq!(added_monitors[0].0, channel_id); added_monitors.clear(); } expect_channel_pending_event(&node_b, &node_a.node.get_our_node_id()); @@ -1296,7 +1286,7 @@ pub fn sign_funding_transaction<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: & { let mut added_monitors = node_a.chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); - assert_eq!(added_monitors[0].0, funding_output); + assert_eq!(added_monitors[0].0, channel_id); added_monitors.clear(); } expect_channel_pending_event(&node_a, &node_b.node.get_our_node_id()); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index f1bc11d421c..95d3bcd1421 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -566,19 +566,23 @@ fn do_test_sanity_on_in_flight_opens(steps: u8) { if steps & 0x0f == 2 { return; } nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel); - let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); + let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); if steps & 0x0f == 3 { return; } nodes[0].node.funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); check_added_monitors!(nodes[0], 0); let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); + let channel_id = ChannelId::v1_from_funding_txid( + funding_created.funding_txid.as_byte_array(), funding_created.funding_output_index + ); + if steps & 0x0f == 4 { return; } nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created); { let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); - assert_eq!(added_monitors[0].0, funding_output); + assert_eq!(added_monitors[0].0, channel_id); added_monitors.clear(); } expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); @@ -590,7 +594,7 @@ fn do_test_sanity_on_in_flight_opens(steps: u8) { { let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); - assert_eq!(added_monitors[0].0, funding_output); + assert_eq!(added_monitors[0].0, channel_id); added_monitors.clear(); } @@ -2392,7 +2396,7 @@ fn channel_monitor_network_test() { // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and // confusing us in the following tests. - let chan_3_mon = nodes[3].chain_monitor.chain_monitor.remove_monitor(&OutPoint { txid: chan_3.3.compute_txid(), index: 0 }); + let chan_3_mon = nodes[3].chain_monitor.chain_monitor.remove_monitor(&chan_3.2); // One pending HTLC to time out: let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[3], &[&nodes[4]], 3_000_000); @@ -2462,7 +2466,7 @@ fn channel_monitor_network_test() { assert_eq!(nodes[3].node.list_channels().len(), 0); assert_eq!(nodes[4].node.list_channels().len(), 0); - assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.compute_txid(), index: 0 }, chan_3_mon), + assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(chan_3.2, chan_3_mon), Ok(ChannelMonitorUpdateStatus::Completed)); check_closed_event!(nodes[3], 1, ClosureReason::HTLCsTimedOut, [node_id_4], 100000); } @@ -3096,7 +3100,7 @@ fn test_htlc_on_chain_success() { { let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); - assert_eq!(added_monitors[0].0.txid, chan_2.3.compute_txid()); + assert_eq!(added_monitors[0].0, chan_2.2); added_monitors.clear(); } let forwarded_events = nodes[1].node.get_and_clear_pending_events(); @@ -3134,8 +3138,8 @@ fn test_htlc_on_chain_success() { { let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 2); - assert_eq!(added_monitors[0].0.txid, chan_1.3.compute_txid()); - assert_eq!(added_monitors[1].0.txid, chan_1.3.compute_txid()); + assert_eq!(added_monitors[0].0, chan_1.2); + assert_eq!(added_monitors[1].0, chan_1.2); added_monitors.clear(); } assert_eq!(events.len(), 3); @@ -8113,7 +8117,7 @@ fn test_bump_txn_sanitize_tracking_maps() { connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, penalty_txn)); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); { - let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(OutPoint { txid: chan.3.compute_txid(), index: 0 }).unwrap(); + let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(chan.2).unwrap(); assert!(monitor.inner.lock().unwrap().onchain_tx_handler.pending_claim_requests.is_empty()); assert!(monitor.inner.lock().unwrap().onchain_tx_handler.claimable_outpoints.is_empty()); } @@ -8728,7 +8732,6 @@ fn test_update_err_monitor_lockdown() { // Create some initial channel let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); - let outpoint = OutPoint { txid: chan_1.3.compute_txid(), index: 0 }; // Rebalance the network to generate htlc in the two directions send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000); @@ -8742,14 +8745,14 @@ fn test_update_err_monitor_lockdown() { let persister = test_utils::TestPersister::new(); let watchtower = { let new_monitor = { - let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap(); + let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(chan_1.2).unwrap(); let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1; assert!(new_monitor == *monitor); new_monitor }; let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); - assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed)); + assert_eq!(watchtower.watch_channel(chan_1.2, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed)); watchtower }; let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()); @@ -8771,8 +8774,8 @@ fn test_update_err_monitor_lockdown() { let mut node_0_peer_state_lock; if let Some(channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2).as_funded_mut() { if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { - assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::InProgress); - assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed); + assert_eq!(watchtower.chain_monitor.update_channel(chan_1.2, &update), ChannelMonitorUpdateStatus::InProgress); + assert_eq!(nodes[0].chain_monitor.update_channel(chan_1.2, &update), ChannelMonitorUpdateStatus::Completed); } else { assert!(false); } } else { assert!(false); @@ -8798,7 +8801,6 @@ fn test_concurrent_monitor_claim() { // Create some initial channel let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); - let outpoint = OutPoint { txid: chan_1.3.compute_txid(), index: 0 }; // Rebalance the network to generate htlc in the two directions send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000); @@ -8815,14 +8817,14 @@ fn test_concurrent_monitor_claim() { ); let watchtower_alice = { let new_monitor = { - let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap(); + let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(chan_1.2).unwrap(); let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1; assert!(new_monitor == *monitor); new_monitor }; let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &alice_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); - assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed)); + assert_eq!(watchtower.watch_channel(chan_1.2, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed)); watchtower }; let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()); @@ -8847,14 +8849,14 @@ fn test_concurrent_monitor_claim() { let bob_broadcaster = test_utils::TestBroadcaster::with_blocks(Arc::clone(&alice_broadcaster.blocks)); let watchtower_bob = { let new_monitor = { - let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap(); + let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(chan_1.2).unwrap(); let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1; assert!(new_monitor == *monitor); new_monitor }; let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &bob_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); - assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed)); + assert_eq!(watchtower.watch_channel(chan_1.2, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed)); watchtower }; watchtower_bob.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), HTLC_TIMEOUT_BROADCAST - 1); @@ -8874,9 +8876,9 @@ fn test_concurrent_monitor_claim() { if let Some(channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2).as_funded_mut() { if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { // Watchtower Alice should already have seen the block and reject the update - assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::InProgress); - assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed); - assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed); + assert_eq!(watchtower_alice.chain_monitor.update_channel(chan_1.2, &update), ChannelMonitorUpdateStatus::InProgress); + assert_eq!(watchtower_bob.chain_monitor.update_channel(chan_1.2, &update), ChannelMonitorUpdateStatus::Completed); + assert_eq!(nodes[0].chain_monitor.update_channel(chan_1.2, &update), ChannelMonitorUpdateStatus::Completed); } else { assert!(false); } } else { assert!(false); @@ -9309,14 +9311,15 @@ fn test_duplicate_conflicting_funding_from_second_peer() { let temp_chan_id = exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0); - let (_, tx, funding_output) = + let (_, tx, funding_outpoint) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42); + let real_chan_id = ChannelId::v1_from_funding_outpoint(funding_outpoint); // Now that we have a funding outpoint, create a dummy `ChannelMonitor` and insert it into // nodes[0]'s ChainMonitor so that the initial `ChannelMonitor` write fails. let dummy_chan_id = create_chan_between_nodes(&nodes[2], &nodes[3]).3; let dummy_monitor = get_monitor!(nodes[2], dummy_chan_id).clone(); - nodes[0].chain_monitor.chain_monitor.watch_channel(funding_output, dummy_monitor).unwrap(); + nodes[0].chain_monitor.chain_monitor.watch_channel(real_chan_id, dummy_monitor).unwrap(); nodes[0].node.funding_transaction_generated(temp_chan_id, nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); @@ -9423,17 +9426,21 @@ fn test_duplicate_chan_id() { } // Move the first channel through the funding flow... - let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); + let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); nodes[0].node.funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); check_added_monitors!(nodes[0], 0); let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); + let channel_id = ChannelId::v1_from_funding_txid( + funding_created_msg.funding_txid.as_byte_array(), funding_created_msg.funding_output_index + ); + nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msg); { let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); - assert_eq!(added_monitors[0].0, funding_output); + assert_eq!(added_monitors[0].0, channel_id); added_monitors.clear(); } expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); @@ -9520,7 +9527,7 @@ fn test_duplicate_chan_id() { { let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); - assert_eq!(added_monitors[0].0, funding_output); + assert_eq!(added_monitors[0].0, channel_id); added_monitors.clear(); } expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id()); diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 9556e988b4e..19bc12d54f9 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -336,7 +336,7 @@ fn do_chanmon_claim_value_coop_close(anchors: bool) { inbound_claiming_htlc_rounded_msat: 0, inbound_htlc_rounded_msat: 0, }], - nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); + nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()); assert_eq!(vec![Balance::ClaimableOnChannelClose { amount_satoshis: 1_000, transaction_fee_satoshis: 0, outbound_payment_htlc_rounded_msat: 0, @@ -344,7 +344,7 @@ fn do_chanmon_claim_value_coop_close(anchors: bool) { inbound_claiming_htlc_rounded_msat: 0, inbound_htlc_rounded_msat: 0, }], - nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); + nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()); nodes[0].node.close_channel(&chan_id, &nodes[1].node.get_our_node_id()).unwrap(); let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); @@ -379,13 +379,13 @@ fn do_chanmon_claim_value_coop_close(anchors: bool) { confirmation_height: nodes[0].best_block_info().1 + ANTI_REORG_DELAY - 1, source: BalanceSource::CoopClose, }], - nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); + nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()); assert_eq!(vec![Balance::ClaimableAwaitingConfirmations { amount_satoshis: 1000, confirmation_height: nodes[1].best_block_info().1 + ANTI_REORG_DELAY - 1, source: BalanceSource::CoopClose, }], - nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); + nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 2); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 2); @@ -399,9 +399,9 @@ fn do_chanmon_claim_value_coop_close(anchors: bool) { connect_blocks(&nodes[1], 1); assert_eq!(Vec::::new(), - nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); + nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()); assert_eq!(Vec::::new(), - nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); + nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()); let spendable_outputs_a = test_spendable_output(&nodes[0], &shutdown_tx[0], false); assert_eq!( @@ -549,7 +549,7 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { inbound_claiming_htlc_rounded_msat: 0, inbound_htlc_rounded_msat: 0, }, sent_htlc_balance.clone(), sent_htlc_timeout_balance.clone()]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); assert_eq!(sorted_vec(vec![Balance::ClaimableOnChannelClose { amount_satoshis: 1_000, transaction_fee_satoshis: 0, @@ -558,7 +558,7 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { inbound_claiming_htlc_rounded_msat: 0, inbound_htlc_rounded_msat: 3300, }, received_htlc_balance.clone(), received_htlc_timeout_balance.clone()]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); nodes[1].node.claim_funds(payment_preimage); check_added_monitors!(nodes[1], 1); @@ -615,7 +615,7 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { a_expected_balances.push(sent_htlc_balance.clone()); } assert_eq!(sorted_vec(a_expected_balances), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); assert_eq!(vec![Balance::ClaimableOnChannelClose { amount_satoshis: 1_000 + 3_000 + 4_000, transaction_fee_satoshis: 0, @@ -625,7 +625,7 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { 200 /* 1 HTLC */ } else { 300 /* 2 HTLCs */ }, inbound_htlc_rounded_msat: 0, }], - nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); + nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()); // Broadcast the closing transaction (which has both pending HTLCs in it) and get B's // broadcasted HTLC claim transaction with preimage. @@ -698,7 +698,7 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { confirmation_height: nodes[0].best_block_info().1 + ANTI_REORG_DELAY - 1, source: BalanceSource::CounterpartyForceClosed, }, sent_htlc_balance.clone(), sent_htlc_timeout_balance.clone()]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); // The main non-HTLC balance is just awaiting confirmations, but the claimable height is the // CSV delay, not ANTI_REORG_DELAY. assert_eq!(sorted_vec(vec![Balance::ClaimableAwaitingConfirmations { @@ -709,7 +709,7 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { // Both HTLC balances are "contentious" as our counterparty could claim them if we wait too // long. received_htlc_claiming_balance.clone(), received_htlc_timeout_claiming_balance.clone()]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); expect_payment_failed!(nodes[0], dust_payment_hash, false); @@ -718,13 +718,13 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { // After ANTI_REORG_DELAY, A will consider its balance fully spendable and generate a // `SpendableOutputs` event. However, B still has to wait for the CSV delay. assert_eq!(sorted_vec(vec![sent_htlc_balance.clone(), sent_htlc_timeout_balance.clone()]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); assert_eq!(sorted_vec(vec![Balance::ClaimableAwaitingConfirmations { amount_satoshis: 1_000, confirmation_height: node_b_commitment_claimable, source: BalanceSource::HolderForceClosed, }, received_htlc_claiming_balance.clone(), received_htlc_timeout_claiming_balance.clone()]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); test_spendable_output(&nodes[0], &remote_txn[0], false); assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); @@ -738,10 +738,10 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { expect_payment_sent(&nodes[0], payment_preimage, None, true, false); } assert_eq!(sorted_vec(vec![sent_htlc_balance.clone(), sent_htlc_timeout_balance.clone()]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); assert_eq!(vec![sent_htlc_timeout_balance.clone()], - nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); + nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()); // When the HTLC timeout output is spendable in the next block, A should broadcast it connect_blocks(&nodes[0], htlc_cltv_timeout - nodes[0].best_block_info().1); @@ -769,12 +769,12 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { confirmation_height: nodes[0].best_block_info().1 + ANTI_REORG_DELAY - 1, source: BalanceSource::Htlc, }], - nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); + nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()); // After ANTI_REORG_DELAY, A will generate a SpendableOutputs event and drop the claimable // balance entry. connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); assert_eq!(Vec::::new(), - nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); + nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()); expect_payment_failed!(nodes[0], timeout_payment_hash, false); test_spendable_output(&nodes[0], &a_htlc_timeout_tx, false); @@ -794,7 +794,7 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { confirmation_height: node_b_htlc_claimable, source: BalanceSource::Htlc, }, received_htlc_timeout_claiming_balance.clone()]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); // After reaching the commitment output CSV, we'll get a SpendableOutputs event for it and have // only the HTLCs claimable on node B. @@ -806,7 +806,7 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { confirmation_height: node_b_htlc_claimable, source: BalanceSource::Htlc, }, received_htlc_timeout_claiming_balance.clone()]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); // After reaching the claimed HTLC output CSV, we'll get a SpendableOutptus event for it and // have only one HTLC output left spendable. @@ -814,17 +814,17 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { test_spendable_output(&nodes[1], &b_broadcast_txn[0], anchors); assert_eq!(vec![received_htlc_timeout_claiming_balance.clone()], - nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); + nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()); // Finally, mine the HTLC timeout transaction that A broadcasted (even though B should be able // to claim this HTLC with the preimage it knows!). It will remain listed as a claimable HTLC // until ANTI_REORG_DELAY confirmations on the spend. mine_transaction(&nodes[1], &a_htlc_timeout_tx); assert_eq!(vec![received_htlc_timeout_claiming_balance.clone()], - nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); + nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); assert_eq!(Vec::::new(), - nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); + nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()); // Ensure that even if we connect more blocks, potentially replaying the entire chain if we're // using `ConnectStyle::HighlyRedundantTransactionsFirstSkippingBlocks`, we don't get new @@ -833,7 +833,7 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { connect_blocks(node, 6); connect_blocks(node, 6); assert!(node.chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); - assert!(node.chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty()); + assert!(node.chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances().is_empty()); } } @@ -888,7 +888,6 @@ fn do_test_balances_on_local_commitment_htlcs(anchors: bool) { // Create a single channel with two pending HTLCs from nodes[0] to nodes[1], one which nodes[1] // knows the preimage for, one which it does not. let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0); - let funding_outpoint = OutPoint { txid: funding_tx.compute_txid(), index: 0 }; let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 10_000_000); let htlc_cltv_timeout = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + 1; // Note ChannelManager adds one to CLTV timeouts for safety @@ -965,7 +964,7 @@ fn do_test_balances_on_local_commitment_htlcs(anchors: bool) { confirmation_height: node_a_commitment_claimable, source: BalanceSource::HolderForceClosed, }, htlc_balance_known_preimage.clone(), htlc_balance_unknown_preimage.clone()]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); // Get nodes[1]'s HTLC claim tx for the second HTLC mine_transaction(&nodes[1], &commitment_tx); @@ -984,7 +983,7 @@ fn do_test_balances_on_local_commitment_htlcs(anchors: bool) { confirmation_height: node_a_commitment_claimable, source: BalanceSource::HolderForceClosed, }, htlc_balance_known_preimage.clone(), htlc_balance_unknown_preimage.clone()]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); if anchors { handle_bump_htlc_event(&nodes[0], 1); } @@ -1014,7 +1013,7 @@ fn do_test_balances_on_local_commitment_htlcs(anchors: bool) { confirmation_height: node_a_commitment_claimable, source: BalanceSource::HolderForceClosed, }, htlc_balance_known_preimage.clone(), htlc_balance_unknown_preimage.clone()]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); if anchors { // The HTLC timeout claim corresponding to the counterparty preimage claim is removed from the @@ -1044,7 +1043,7 @@ fn do_test_balances_on_local_commitment_htlcs(anchors: bool) { confirmation_height: node_a_htlc_claimable, source: BalanceSource::Htlc, }, htlc_balance_unknown_preimage.clone()]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); // Finally make the HTLC transactions have ANTI_REORG_DELAY blocks. This call previously // panicked as described in the test introduction. This will remove the "maybe claimable" @@ -1061,7 +1060,7 @@ fn do_test_balances_on_local_commitment_htlcs(anchors: bool) { confirmation_height: node_a_htlc_claimable, source: BalanceSource::Htlc, }]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); // Connect blocks until the commitment transaction's CSV expires, providing us the relevant // `SpendableOutputs` event and removing the claimable balance entry. @@ -1074,7 +1073,7 @@ fn do_test_balances_on_local_commitment_htlcs(anchors: bool) { confirmation_height: node_a_htlc_claimable, source: BalanceSource::Htlc, }], - nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); + nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()); let to_self_spendable_output = test_spendable_output(&nodes[0], &commitment_tx, false); assert_eq!( get_monitor!(nodes[0], chan_id).get_spendable_outputs(&commitment_tx, commitment_tx_conf_height_a), @@ -1084,7 +1083,7 @@ fn do_test_balances_on_local_commitment_htlcs(anchors: bool) { // Connect blocks until the HTLC-Timeout's CSV expires, providing us the relevant // `SpendableOutputs` event and removing the claimable balance entry. connect_blocks(&nodes[0], node_a_htlc_claimable - nodes[0].best_block_info().1); - assert!(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty()); + assert!(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances().is_empty()); test_spendable_output(&nodes[0], &timeout_htlc_txn[0], false); // Ensure that even if we connect more blocks, potentially replaying the entire chain if we're @@ -1093,7 +1092,7 @@ fn do_test_balances_on_local_commitment_htlcs(anchors: bool) { connect_blocks(&nodes[0], 6); connect_blocks(&nodes[0], 6); assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); - assert!(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty()); + assert!(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances().is_empty()); } #[test] @@ -1112,7 +1111,6 @@ fn test_no_preimage_inbound_htlc_balances() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000); - let funding_outpoint = OutPoint { txid: funding_tx.compute_txid(), index: 0 }; // Send two HTLCs, one from A to B, and one from B to A. let to_b_failed_payment_hash = route_payment(&nodes[0], &[&nodes[1]], 10_000_000).1; @@ -1158,7 +1156,7 @@ fn test_no_preimage_inbound_htlc_balances() { inbound_claiming_htlc_rounded_msat: 0, inbound_htlc_rounded_msat: 0, }, a_received_htlc_balance.clone(), a_sent_htlc_balance.clone()]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); assert_eq!(sorted_vec(vec![Balance::ClaimableOnChannelClose { amount_satoshis: 500_000 - 20_000, @@ -1168,7 +1166,7 @@ fn test_no_preimage_inbound_htlc_balances() { inbound_claiming_htlc_rounded_msat: 0, inbound_htlc_rounded_msat: 0, }, b_received_htlc_balance.clone(), b_sent_htlc_balance.clone()]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); // Get nodes[0]'s commitment transaction and HTLC-Timeout transaction let as_txn = get_local_commitment_txn!(nodes[0], chan_id); @@ -1193,7 +1191,7 @@ fn test_no_preimage_inbound_htlc_balances() { check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 1000000); assert_eq!(as_pre_spend_claims, - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); mine_transaction(&nodes[1], &as_txn[0]); check_added_monitors!(nodes[1], 1); @@ -1207,7 +1205,7 @@ fn test_no_preimage_inbound_htlc_balances() { source: BalanceSource::CounterpartyForceClosed, }, b_received_htlc_balance.clone(), b_sent_htlc_balance.clone()]); assert_eq!(bs_pre_spend_claims, - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); // We'll broadcast the HTLC-Timeout transaction one block prior to the htlc's expiration (as it // is confirmable in the next block), but will still include the same claimable balances as no @@ -1222,11 +1220,11 @@ fn test_no_preimage_inbound_htlc_balances() { [HTLCDestination::FailedPayment { payment_hash: to_a_failed_payment_hash }]); assert_eq!(as_pre_spend_claims, - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); connect_blocks(&nodes[0], 1); assert_eq!(as_pre_spend_claims, - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); // For node B, we'll get the non-HTLC funds claimable after ANTI_REORG_DELAY confirmations connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); @@ -1243,11 +1241,11 @@ fn test_no_preimage_inbound_htlc_balances() { check_spends!(bs_htlc_timeout_claim[0], as_txn[0]); assert_eq!(bs_pre_spend_claims, - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); connect_blocks(&nodes[1], 1); assert_eq!(bs_pre_spend_claims, - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); // Now confirm the two HTLC timeout transactions for A, checking that the inbound HTLC resolves // after ANTI_REORG_DELAY confirmations and the other takes BREAKDOWN_TIMEOUT confirmations. @@ -1263,7 +1261,7 @@ fn test_no_preimage_inbound_htlc_balances() { confirmation_height: as_timeout_claimable_height, source: BalanceSource::Htlc, }]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); mine_transaction(&nodes[0], &bs_htlc_timeout_claim[0]); assert_eq!(sorted_vec(vec![Balance::ClaimableAwaitingConfirmations { @@ -1276,7 +1274,7 @@ fn test_no_preimage_inbound_htlc_balances() { confirmation_height: as_timeout_claimable_height, source: BalanceSource::Htlc, }]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); // Once as_htlc_timeout_claim[0] reaches ANTI_REORG_DELAY confirmations, we should get a // payment failure event. @@ -1294,7 +1292,7 @@ fn test_no_preimage_inbound_htlc_balances() { confirmation_height: core::cmp::max(as_timeout_claimable_height, htlc_cltv_timeout), source: BalanceSource::Htlc, }]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); connect_blocks(&nodes[0], node_a_commitment_claimable - nodes[0].best_block_info().1); assert_eq!(vec![Balance::ClaimableAwaitingConfirmations { @@ -1302,11 +1300,11 @@ fn test_no_preimage_inbound_htlc_balances() { confirmation_height: core::cmp::max(as_timeout_claimable_height, htlc_cltv_timeout), source: BalanceSource::Htlc, }], - nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); + nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()); test_spendable_output(&nodes[0], &as_txn[0], false); connect_blocks(&nodes[0], as_timeout_claimable_height - nodes[0].best_block_info().1); - assert!(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty()); + assert!(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances().is_empty()); test_spendable_output(&nodes[0], &as_htlc_timeout_claim[0], false); // The process for B should be completely identical as well, noting that the non-HTLC-balance @@ -1318,7 +1316,7 @@ fn test_no_preimage_inbound_htlc_balances() { confirmation_height: bs_timeout_claimable_height, source: BalanceSource::Htlc, }]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); mine_transaction(&nodes[1], &as_htlc_timeout_claim[0]); assert_eq!(sorted_vec(vec![b_received_htlc_balance.clone(), Balance::ClaimableAwaitingConfirmations { @@ -1326,17 +1324,17 @@ fn test_no_preimage_inbound_htlc_balances() { confirmation_height: bs_timeout_claimable_height, source: BalanceSource::Htlc, }]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 2); expect_payment_failed!(nodes[1], to_a_failed_payment_hash, false); assert_eq!(vec![b_received_htlc_balance.clone()], - nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); + nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()); test_spendable_output(&nodes[1], &bs_htlc_timeout_claim[0], false); connect_blocks(&nodes[1], 1); - assert!(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty()); + assert!(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances().is_empty()); // Ensure that even if we connect more blocks, potentially replaying the entire chain if we're // using `ConnectStyle::HighlyRedundantTransactionsFirstSkippingBlocks`, we don't get new @@ -1344,7 +1342,7 @@ fn test_no_preimage_inbound_htlc_balances() { connect_blocks(&nodes[1], 6); connect_blocks(&nodes[1], 6); assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); - assert!(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty()); + assert!(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances().is_empty()); } fn do_test_revoked_counterparty_commitment_balances(anchors: bool, confirm_htlc_spend_first: bool) { @@ -1471,7 +1469,7 @@ fn do_test_revoked_counterparty_commitment_balances(anchors: bool, confirm_htlc_ outbound_payment: true, }, ]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()), + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()), ); mine_transaction(&nodes[1], &as_revoked_txn[0]); @@ -1526,7 +1524,7 @@ fn do_test_revoked_counterparty_commitment_balances(anchors: bool, confirm_htlc_ htlc_unclaimed_balance(4_000), htlc_unclaimed_balance(5_000), ]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()), + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()), ); if confirm_htlc_spend_first { @@ -1558,7 +1556,7 @@ fn do_test_revoked_counterparty_commitment_balances(anchors: bool, confirm_htlc_ pinnable_claimed_balance.clone(), to_self_unclaimed_balance.clone(), ]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()), + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()), ); } else { assert_eq!( @@ -1569,7 +1567,7 @@ fn do_test_revoked_counterparty_commitment_balances(anchors: bool, confirm_htlc_ htlc_unclaimed_balance(4_000), htlc_unclaimed_balance(5_000), ]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()), + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()), ); } @@ -1584,7 +1582,7 @@ fn do_test_revoked_counterparty_commitment_balances(anchors: bool, confirm_htlc_ pinnable_claimed_balance.clone(), unpinnable_claimed_balance.clone(), ]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()), + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()), ); connect_blocks(&nodes[1], 3); @@ -1618,7 +1616,7 @@ fn do_test_revoked_counterparty_commitment_balances(anchors: bool, confirm_htlc_ expect_payment_failed_conditions_event(payment_failed_events[2..].to_vec(), timeout_payment_hash, false, PaymentFailedConditions::new()); } - assert_eq!(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances(), Vec::new()); + assert_eq!(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances(), Vec::new()); // Ensure that even if we connect more blocks, potentially replaying the entire chain if we're // using `ConnectStyle::HighlyRedundantTransactionsFirstSkippingBlocks`, we don't get new @@ -1626,7 +1624,7 @@ fn do_test_revoked_counterparty_commitment_balances(anchors: bool, confirm_htlc_ connect_blocks(&nodes[1], 6); connect_blocks(&nodes[1], 6); assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); - assert!(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty()); + assert!(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances().is_empty()); } #[test] @@ -1776,7 +1774,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(anchors: bool) { amount_satoshis: 1_000, }]); assert_eq!(as_balances, - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); mine_transaction(&nodes[0], &revoked_htlc_success); let as_htlc_claim_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); @@ -1789,7 +1787,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(anchors: bool) { check_spends!(as_htlc_claim_tx[1], revoked_local_txn[0]); assert_eq!(as_balances, - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); assert_eq!(as_htlc_claim_tx[0].output.len(), 1); let as_revoked_htlc_success_claim_fee = chan_feerate * as_htlc_claim_tx[0].weight().to_wu() / 1000; @@ -1819,7 +1817,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(anchors: bool) { confirmation_height: nodes[0].best_block_info().1 + ANTI_REORG_DELAY - 1, source: BalanceSource::CounterpartyForceClosed, }]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 3); test_spendable_output(&nodes[0], &revoked_local_txn[0], false); @@ -1833,7 +1831,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(anchors: bool) { confirmation_height: nodes[0].best_block_info().1 + 2, source: BalanceSource::CounterpartyForceClosed, }]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); connect_blocks(&nodes[0], 2); test_spendable_output(&nodes[0], &as_htlc_claim_tx[0], false); @@ -1843,7 +1841,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(anchors: bool) { }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 2 amount_satoshis: 1_000, }]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); connect_blocks(&nodes[0], revoked_htlc_timeout.lock_time.to_consensus_u32() - nodes[0].best_block_info().1); expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(&nodes[0], @@ -1881,7 +1879,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(anchors: bool) { }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 2 amount_satoshis: 1_000, }]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); mine_transaction(&nodes[0], &revoked_htlc_timeout_claim); assert_eq!(sorted_vec(vec![Balance::CounterpartyRevokedOutputClaimable { @@ -1892,7 +1890,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(anchors: bool) { confirmation_height: nodes[0].best_block_info().1 + ANTI_REORG_DELAY - 1, source: BalanceSource::CounterpartyForceClosed, }]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); mine_transaction(&nodes[0], &revoked_to_self_claim); assert_eq!(sorted_vec(vec![Balance::ClaimableAwaitingConfirmations { @@ -1905,14 +1903,14 @@ fn do_test_revoked_counterparty_htlc_tx_balances(anchors: bool) { confirmation_height: nodes[0].best_block_info().1 + ANTI_REORG_DELAY - 2, source: BalanceSource::CounterpartyForceClosed, }]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 2); test_spendable_output(&nodes[0], &revoked_htlc_timeout_claim, false); connect_blocks(&nodes[0], 1); test_spendable_output(&nodes[0], &revoked_to_self_claim, false); - assert_eq!(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances(), Vec::new()); + assert_eq!(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances(), Vec::new()); // Ensure that even if we connect more blocks, potentially replaying the entire chain if we're // using `ConnectStyle::HighlyRedundantTransactionsFirstSkippingBlocks`, we don't get new @@ -1920,7 +1918,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(anchors: bool) { connect_blocks(&nodes[0], 6); connect_blocks(&nodes[0], 6); assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); - assert!(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty()); + assert!(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances().is_empty()); } #[test] @@ -2025,7 +2023,7 @@ fn do_test_revoked_counterparty_aggregated_claims(anchors: bool) { payment_hash: claimed_payment_hash, outbound_payment: true, }]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); mine_transaction(&nodes[1], &as_revoked_txn[0]); check_closed_broadcast!(nodes[1], true); @@ -2063,7 +2061,7 @@ fn do_test_revoked_counterparty_aggregated_claims(anchors: bool) { }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 2 amount_satoshis: 3_000, }]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); // Confirm A's HTLC-Success transaction which presumably raced B's claim, causing B to create a // new claim. @@ -2119,7 +2117,7 @@ fn do_test_revoked_counterparty_aggregated_claims(anchors: bool) { // anyway, so its not a big change. amount_satoshis: 3_000, }]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); connect_blocks(&nodes[1], 5); test_spendable_output(&nodes[1], &as_revoked_txn[0], false); @@ -2135,7 +2133,7 @@ fn do_test_revoked_counterparty_aggregated_claims(anchors: bool) { // anyway, so its not a big change. amount_satoshis: 3_000, }]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); mine_transaction(&nodes[1], &claim_txn_2[0]); let htlc_2_claim_maturity = nodes[1].best_block_info().1 + ANTI_REORG_DELAY - 1; @@ -2150,7 +2148,7 @@ fn do_test_revoked_counterparty_aggregated_claims(anchors: bool) { confirmation_height: htlc_2_claim_maturity, source: BalanceSource::CounterpartyForceClosed, }]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); connect_blocks(&nodes[1], 5); test_spendable_output(&nodes[1], &claim_txn_2[0], false); @@ -2161,7 +2159,7 @@ fn do_test_revoked_counterparty_aggregated_claims(anchors: bool) { }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 1 amount_satoshis: 4_000, }]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); mine_transactions(&nodes[1], &[&claim_txn[0], &claim_txn_2[1]]); let rest_claim_maturity = nodes[1].best_block_info().1 + ANTI_REORG_DELAY - 1; @@ -2179,7 +2177,7 @@ fn do_test_revoked_counterparty_aggregated_claims(anchors: bool) { source: BalanceSource::CounterpartyForceClosed, }, ], - nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); + nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); // We shouldn't fail the payment until we spend the output @@ -2198,7 +2196,7 @@ fn do_test_revoked_counterparty_aggregated_claims(anchors: bool) { panic!("unexpected event"); } } - assert!(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty()); + assert!(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances().is_empty()); // Ensure that even if we connect more blocks, potentially replaying the entire chain if we're // using `ConnectStyle::HighlyRedundantTransactionsFirstSkippingBlocks`, we don't get new @@ -2206,7 +2204,7 @@ fn do_test_revoked_counterparty_aggregated_claims(anchors: bool) { connect_blocks(&nodes[1], 6); connect_blocks(&nodes[1], 6); assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); - assert!(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty()); + assert!(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances().is_empty()); } #[test] @@ -2287,16 +2285,16 @@ fn do_test_claimable_balance_correct_while_payment_pending(outbound_payment: boo if outbound_payment { assert_eq!( 1_000_000 - commitment_tx_fee - anchor_outputs_value - 4_001 /* Note HTLC timeout amount of 4001 sats is excluded for outbound payment */, - nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint_ab).unwrap().get_claimable_balances().iter().map( + nodes[0].chain_monitor.chain_monitor.get_monitor(chan_ab_id).unwrap().get_claimable_balances().iter().map( |x| x.claimable_amount_satoshis()).sum()); } else { assert_eq!( 0u64, - nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint_ab).unwrap().get_claimable_balances().iter().map( + nodes[1].chain_monitor.chain_monitor.get_monitor(chan_ab_id).unwrap().get_claimable_balances().iter().map( |x| x.claimable_amount_satoshis()).sum()); assert_eq!( 1_000_000 - commitment_tx_fee - anchor_outputs_value /* Note HTLC timeout amount of 4000 sats is included */, - nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint_bc).unwrap().get_claimable_balances().iter().map( + nodes[1].chain_monitor.chain_monitor.get_monitor(chan_bc_id).unwrap().get_claimable_balances().iter().map( |x| x.claimable_amount_satoshis()).sum()); } } @@ -2411,7 +2409,7 @@ fn do_test_monitor_rebroadcast_pending_claims(anchors: bool) { let htlc_expiry = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + 1; - let commitment_txn = get_local_commitment_txn!(&nodes[0], &chan_id); + let commitment_txn = get_local_commitment_txn!(&nodes[0], chan_id); assert_eq!(commitment_txn.len(), if anchors { 1 /* commitment tx only */} else { 2 /* commitment and htlc timeout tx */ }); check_spends!(&commitment_txn[0], &funding_tx); mine_transaction(&nodes[0], &commitment_txn[0]); @@ -2769,7 +2767,7 @@ fn test_anchors_aggregated_revoked_htlc_tx() { bob_persister, bob_chain_monitor, bob_deserialized ); for chan_id in [chan_a.2, chan_b.2].iter() { - let monitor = get_monitor!(nodes[1], chan_id); + let monitor = get_monitor!(nodes[1], *chan_id); for payment in [payment_a, payment_b, payment_c, payment_d].iter() { monitor.provide_payment_preimage_unsafe_legacy( &payment.1, &payment.0, &node_cfgs[1].tx_broadcaster, diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index 55e97e06894..92ebe2d8cbc 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -697,8 +697,8 @@ fn test_0conf_channel_with_async_monitor() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, _, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&bs_raa.channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, latest_update).unwrap(); + let (_, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&bs_raa.channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(bs_raa.channel_id, latest_update).unwrap(); check_added_monitors!(nodes[1], 0); expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors!(nodes[1], 1); diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 28465a09660..054d4fdf5af 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -25,6 +25,7 @@ use crate::util::errors::APIError; use crate::util::ser::{Writeable, ReadableArgs}; use crate::util::config::UserConfig; +use bitcoin::hashes::Hash; use bitcoin::hash_types::BlockHash; use crate::prelude::*; @@ -256,11 +257,16 @@ fn test_manager_serialize_deserialize_events() { node_a.node.funding_transaction_generated(temporary_channel_id, node_b.node.get_our_node_id(), tx.clone()).unwrap(); check_added_monitors!(node_a, 0); - node_b.node.handle_funding_created(node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id())); + let funding_created = get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id()); + let channel_id = ChannelId::v1_from_funding_txid( + funding_created.funding_txid.as_byte_array(), funding_created.funding_output_index + ); + + node_b.node.handle_funding_created(node_a.node.get_our_node_id(), &funding_created); { let mut added_monitors = node_b.chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); - assert_eq!(added_monitors[0].0, funding_output); + assert_eq!(added_monitors[0].0, channel_id); added_monitors.clear(); } @@ -269,7 +275,7 @@ fn test_manager_serialize_deserialize_events() { { let mut added_monitors = node_a.chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); - assert_eq!(added_monitors[0].0, funding_output); + assert_eq!(added_monitors[0].0, channel_id); added_monitors.clear(); } // Normally, this is where node_a would broadcast the funding transaction, but the test de/serializes first instead @@ -368,7 +374,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { let mut node_0_stale_monitors_serialized = Vec::new(); for chan_id_iter in &[chan_id_1, chan_id_2, channel_id] { let mut writer = test_utils::TestVecWriter(Vec::new()); - get_monitor!(nodes[0], chan_id_iter).write(&mut writer).unwrap(); + get_monitor!(nodes[0], *chan_id_iter).write(&mut writer).unwrap(); node_0_stale_monitors_serialized.push(writer.0); } @@ -386,7 +392,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { // nodes[3]) let mut node_0_monitors_serialized = Vec::new(); for chan_id_iter in &[chan_id_1, chan_id_2, channel_id] { - node_0_monitors_serialized.push(get_monitor!(nodes[0], chan_id_iter).encode()); + node_0_monitors_serialized.push(get_monitor!(nodes[0], *chan_id_iter).encode()); } logger = test_utils::TestLogger::new(); @@ -450,8 +456,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { assert!(nodes_0_read.is_empty()); for monitor in node_0_monitors.drain(..) { - let funding_outpoint = monitor.get_funding_txo().0; - assert_eq!(nodes[0].chain_monitor.watch_channel(funding_outpoint, monitor), + assert_eq!(nodes[0].chain_monitor.watch_channel(monitor.channel_id(), monitor), Ok(ChannelMonitorUpdateStatus::Completed)); check_added_monitors!(nodes[0], 1); } @@ -839,10 +844,10 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { // monitors and ChannelManager, for use later, if we don't want to persist both monitors. let mut original_monitor = test_utils::TestVecWriter(Vec::new()); if !persist_both_monitors { - for (outpoint, channel_id) in nodes[3].chain_monitor.chain_monitor.list_monitors() { + for channel_id in nodes[3].chain_monitor.chain_monitor.list_monitors() { if channel_id == chan_id_not_persisted { assert!(original_monitor.0.is_empty()); - nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut original_monitor).unwrap(); + nodes[3].chain_monitor.chain_monitor.get_monitor(channel_id).unwrap().write(&mut original_monitor).unwrap(); } } } @@ -859,18 +864,18 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { // crashed in between the two persistence calls - using one old ChannelMonitor and one new one, // with the old ChannelManager. let mut updated_monitor = test_utils::TestVecWriter(Vec::new()); - for (outpoint, channel_id) in nodes[3].chain_monitor.chain_monitor.list_monitors() { + for channel_id in nodes[3].chain_monitor.chain_monitor.list_monitors() { if channel_id == chan_id_persisted { assert!(updated_monitor.0.is_empty()); - nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut updated_monitor).unwrap(); + nodes[3].chain_monitor.chain_monitor.get_monitor(channel_id).unwrap().write(&mut updated_monitor).unwrap(); } } // If `persist_both_monitors` is set, get the second monitor here as well if persist_both_monitors { - for (outpoint, channel_id) in nodes[3].chain_monitor.chain_monitor.list_monitors() { + for channel_id in nodes[3].chain_monitor.chain_monitor.list_monitors() { if channel_id == chan_id_not_persisted { assert!(original_monitor.0.is_empty()); - nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut original_monitor).unwrap(); + nodes[3].chain_monitor.chain_monitor.get_monitor(channel_id).unwrap().write(&mut original_monitor).unwrap(); } } } diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index 9fd428329af..78752b901a8 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -1362,8 +1362,8 @@ fn do_outbound_update_no_early_closing_signed(use_htlc: bool) { expect_channel_shutdown_state!(nodes[0], chan_id, ChannelShutdownState::ResolvingHTLCs); assert_eq!(nodes[0].node.get_and_clear_pending_msg_events(), Vec::new()); - let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); - nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id, latest_update); let as_raa_closing_signed = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(as_raa_closing_signed.len(), 2); diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 48bdfe2324a..7ebbf1c5734 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -387,9 +387,9 @@ impl SignerProvider for OnlyReadsKeysInterface { } pub struct TestChainMonitor<'a> { - pub added_monitors: Mutex)>>, + pub added_monitors: Mutex)>>, pub monitor_updates: Mutex>>, - pub latest_monitor_update_id: Mutex>, + pub latest_monitor_update_id: Mutex>, pub chain_monitor: ChainMonitor< TestChannelSigner, &'a TestChainSource, @@ -432,14 +432,14 @@ impl<'a> TestChainMonitor<'a> { } pub fn complete_sole_pending_chan_update(&self, channel_id: &ChannelId) { - let (outpoint, _, latest_update) = + let (_, latest_update) = self.latest_monitor_update_id.lock().unwrap().get(channel_id).unwrap().clone(); - self.chain_monitor.channel_monitor_updated(outpoint, latest_update).unwrap(); + self.chain_monitor.channel_monitor_updated(*channel_id, latest_update).unwrap(); } } impl<'a> chain::Watch for TestChainMonitor<'a> { fn watch_channel( - &self, funding_txo: OutPoint, monitor: ChannelMonitor, + &self, channel_id: ChannelId, monitor: ChannelMonitor, ) -> Result { // At every point where we get a monitor update, we should be able to send a useful monitor // to a watchtower and disk... @@ -452,23 +452,21 @@ impl<'a> chain::Watch for TestChainMonitor<'a> { .unwrap() .1; assert!(new_monitor == monitor); - self.latest_monitor_update_id.lock().unwrap().insert( - monitor.channel_id(), - (funding_txo, monitor.get_latest_update_id(), monitor.get_latest_update_id()), - ); - self.added_monitors.lock().unwrap().push((funding_txo, monitor)); - self.chain_monitor.watch_channel(funding_txo, new_monitor) + self.latest_monitor_update_id + .lock() + .unwrap() + .insert(channel_id, (monitor.get_latest_update_id(), monitor.get_latest_update_id())); + self.added_monitors.lock().unwrap().push((channel_id, monitor)); + self.chain_monitor.watch_channel(channel_id, new_monitor) } fn update_channel( - &self, funding_txo: OutPoint, update: &ChannelMonitorUpdate, + &self, channel_id: ChannelId, update: &ChannelMonitorUpdate, ) -> chain::ChannelMonitorUpdateStatus { // Every monitor update should survive roundtrip let mut w = TestVecWriter(Vec::new()); update.write(&mut w).unwrap(); assert_eq!(ChannelMonitorUpdate::read(&mut &w.0[..]).unwrap(), *update); - let channel_id = - update.channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(funding_txo)); self.monitor_updates .lock() @@ -491,11 +489,11 @@ impl<'a> chain::Watch for TestChainMonitor<'a> { self.latest_monitor_update_id .lock() .unwrap() - .insert(channel_id, (funding_txo, update.update_id, update.update_id)); - let update_res = self.chain_monitor.update_channel(funding_txo, update); + .insert(channel_id, (update.update_id, update.update_id)); + let update_res = self.chain_monitor.update_channel(channel_id, update); // At every point where we get a monitor update, we should be able to send a useful monitor // to a watchtower and disk... - let monitor = self.chain_monitor.get_monitor(funding_txo).unwrap(); + let monitor = self.chain_monitor.get_monitor(channel_id).unwrap(); w.0.clear(); monitor.write(&mut w).unwrap(); let new_monitor = <(BlockHash, ChannelMonitor)>::read( @@ -510,7 +508,7 @@ impl<'a> chain::Watch for TestChainMonitor<'a> { } else { assert!(new_monitor == *monitor); } - self.added_monitors.lock().unwrap().push((funding_txo, new_monitor)); + self.added_monitors.lock().unwrap().push((channel_id, new_monitor)); update_res } From f4fff426ee842bae5575b673c34a5dbe4225e0d5 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Thu, 16 Jan 2025 15:23:16 -0800 Subject: [PATCH 2/3] Use channel ID over funding outpoint to track monitors in ChannelManager As motivated by the previous commit, we do some of the same work here at the `ChannelManager` level instead. Unfortunately, we still need to track the funding outpoint to support downgrades by writing the in flight monitor updates as two separate TLVs, one using the channel IDs, and the other using the funding outpoints. Once we are willing to stop supporting downgrades past this version, we can fully drop it. --- lightning/src/ln/channelmanager.rs | 109 ++++++++++++---------- lightning/src/ln/functional_test_utils.rs | 4 +- lightning/src/ln/reload_tests.rs | 4 +- 3 files changed, 64 insertions(+), 53 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 232e763a8a2..26d9c96ac0f 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1309,7 +1309,8 @@ pub(super) struct PeerState where SP::Target: SignerProvider { /// for broadcast messages, where ordering isn't as strict). pub(super) pending_msg_events: Vec, /// Map from Channel IDs to pending [`ChannelMonitorUpdate`]s which have been passed to the - /// user but which have not yet completed. + /// user but which have not yet completed. We still keep the funding outpoint around to backfill + /// the legacy TLV field to support downgrading. /// /// Note that the channel may no longer exist. For example if the channel was closed but we /// later needed to claim an HTLC which is pending on-chain, we may generate a monitor update @@ -1321,7 +1322,7 @@ pub(super) struct PeerState where SP::Target: SignerProvider { /// where we complete one [`ChannelMonitorUpdate`] (but there are more pending as background /// events) but we conclude all pending [`ChannelMonitorUpdate`]s have completed and its safe /// to run post-completion actions. - in_flight_monitor_updates: BTreeMap>, + in_flight_monitor_updates: BTreeMap<(OutPoint, ChannelId), Vec>, /// Map from a specific channel to some action(s) that should be taken when all pending /// [`ChannelMonitorUpdate`]s for the channel complete updating. /// @@ -3284,7 +3285,7 @@ macro_rules! handle_new_monitor_update { $chan_id: expr, $counterparty_node_id: expr, $in_flight_updates: ident, $update_idx: ident, _internal_outer, $completed: expr ) => { { - $in_flight_updates = $peer_state.in_flight_monitor_updates.entry($funding_txo) + $in_flight_updates = $peer_state.in_flight_monitor_updates.entry(($funding_txo, $chan_id)) .or_insert_with(Vec::new); // During startup, we push monitor updates as background events through to here in // order to replay updates that were in-flight when we shut down. Thus, we have to @@ -4010,7 +4011,7 @@ where let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mtx) = per_peer_state.get(&shutdown_res.counterparty_node_id) { let mut peer_state = peer_state_mtx.lock().unwrap(); - if peer_state.in_flight_monitor_updates.get(&funding_txo).map(|l| l.is_empty()).unwrap_or(true) { + if peer_state.in_flight_monitor_updates.get(&(funding_txo, shutdown_res.channel_id)).map(|l| l.is_empty()).unwrap_or(true) { let update_actions = peer_state.monitor_update_blocked_actions .remove(&shutdown_res.channel_id).unwrap_or(Vec::new()); @@ -7574,7 +7575,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state = &mut *peer_state_lock; let remaining_in_flight = - if let Some(pending) = peer_state.in_flight_monitor_updates.get_mut(funding_txo) { + if let Some(pending) = peer_state.in_flight_monitor_updates.get_mut(&(*funding_txo, *channel_id)) { pending.retain(|upd| upd.update_id > highest_applied_update_id); pending.len() } else { 0 }; @@ -12986,12 +12987,22 @@ where pending_claiming_payments = None; } - let mut in_flight_monitor_updates: Option>> = None; + let mut legacy_in_flight_monitor_updates: Option>> = None; + let mut in_flight_monitor_updates: Option>> = None; for ((counterparty_id, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) { - for (funding_outpoint, updates) in peer_state.in_flight_monitor_updates.iter() { + for ((funding_txo, channel_id), updates) in peer_state.in_flight_monitor_updates.iter() { if !updates.is_empty() { - if in_flight_monitor_updates.is_none() { in_flight_monitor_updates = Some(new_hash_map()); } - in_flight_monitor_updates.as_mut().unwrap().insert((counterparty_id, funding_outpoint), updates); + if legacy_in_flight_monitor_updates.is_none() { + legacy_in_flight_monitor_updates = Some(new_hash_map()); + } + legacy_in_flight_monitor_updates.as_mut().unwrap() + .insert((counterparty_id, funding_txo), updates); + + if in_flight_monitor_updates.is_none() { + in_flight_monitor_updates = Some(new_hash_map()); + } + in_flight_monitor_updates.as_mut().unwrap() + .insert((counterparty_id, channel_id), updates); } } } @@ -13006,11 +13017,12 @@ where (7, self.fake_scid_rand_bytes, required), (8, if events_not_backwards_compatible { Some(&*events) } else { None }, option), (9, htlc_purposes, required_vec), - (10, in_flight_monitor_updates, option), + (10, legacy_in_flight_monitor_updates, option), (11, self.probing_cookie_secret, required), (13, htlc_onion_fields, optional_vec), (14, decode_update_add_htlcs_opt, option), (15, self.inbound_payment_id_secret, required), + (17, in_flight_monitor_updates, required), }); Ok(()) @@ -13146,8 +13158,7 @@ where /// runtime settings which were stored when the ChannelManager was serialized. pub default_config: UserConfig, - /// A map from channel funding outpoints to ChannelMonitors for those channels (ie - /// value.context.get_funding_txo() should be the key). + /// A map from channel IDs to ChannelMonitors for those channels. /// /// If a monitor is inconsistent with the channel state during deserialization the channel will /// be force-closed using the data in the ChannelMonitor and the channel will be dropped. This @@ -13158,7 +13169,7 @@ where /// this struct. /// /// This is not exported to bindings users because we have no HashMap bindings - pub channel_monitors: HashMap::EcdsaSigner>>, + pub channel_monitors: HashMap::EcdsaSigner>>, } impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> @@ -13187,7 +13198,7 @@ where entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor, tx_broadcaster, router, message_router, logger, default_config, channel_monitors: hash_map_from_iter( - channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) }) + channel_monitors.drain(..).map(|monitor| { (monitor.channel_id(), monitor) }) ), } } @@ -13250,22 +13261,21 @@ where let mut failed_htlcs = Vec::new(); let channel_count: u64 = Readable::read(reader)?; - let mut funding_txo_set = hash_set_with_capacity(cmp::min(channel_count as usize, 128)); + let mut channel_id_set = hash_set_with_capacity(cmp::min(channel_count as usize, 128)); let mut per_peer_state = hash_map_with_capacity(cmp::min(channel_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex>)>())); let mut outpoint_to_peer = hash_map_with_capacity(cmp::min(channel_count as usize, 128)); let mut short_to_chan_info = hash_map_with_capacity(cmp::min(channel_count as usize, 128)); let mut channel_closures = VecDeque::new(); let mut close_background_events = Vec::new(); - let mut funding_txo_to_channel_id = hash_map_with_capacity(channel_count as usize); for _ in 0..channel_count { let mut channel: FundedChannel = FundedChannel::read(reader, ( &args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config) ))?; let logger = WithChannelContext::from(&args.logger, &channel.context, None); + let channel_id = channel.context.channel_id(); + channel_id_set.insert(channel_id.clone()); let funding_txo = channel.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?; - funding_txo_to_channel_id.insert(funding_txo, channel.context.channel_id()); - funding_txo_set.insert(funding_txo.clone()); - if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) { + if let Some(ref mut monitor) = args.channel_monitors.get_mut(&channel_id) { if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() || channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() || channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() || @@ -13348,9 +13358,7 @@ where if let Some(short_channel_id) = channel.context.get_short_channel_id() { short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id())); } - if let Some(funding_txo) = channel.context.get_funding_txo() { - outpoint_to_peer.insert(funding_txo, channel.context.get_counterparty_node_id()); - } + outpoint_to_peer.insert(funding_txo, channel.context.get_counterparty_node_id()); per_peer_state.entry(channel.context.get_counterparty_node_id()) .or_insert_with(|| Mutex::new(empty_peer_state())) .get_mut().unwrap() @@ -13380,8 +13388,8 @@ where } } - for (funding_txo, monitor) in args.channel_monitors.iter() { - if !funding_txo_set.contains(funding_txo) { + for (channel_id, monitor) in args.channel_monitors.iter() { + if !channel_id_set.contains(channel_id) { let mut should_queue_fc_update = false; if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() { // If the ChannelMonitor had any updates, we may need to update it further and @@ -13419,10 +13427,11 @@ where updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }], channel_id: Some(monitor.channel_id()), }; + let funding_txo = monitor.get_funding_txo().0; if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() { let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, - funding_txo: *funding_txo, + funding_txo, channel_id, update: monitor_update, }; @@ -13435,7 +13444,7 @@ where // generate a `ChannelMonitorUpdate` for it aside from this // `ChannelForceClosed` one. monitor_update.update_id = u64::MAX; - close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, channel_id, monitor_update))); + close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, channel_id, monitor_update))); } } } @@ -13535,7 +13544,10 @@ where let mut pending_claiming_payments = Some(new_hash_map()); let mut monitor_update_blocked_actions_per_peer: Option>)>> = Some(Vec::new()); let mut events_override = None; - let mut in_flight_monitor_updates: Option>> = None; + let mut _legacy_in_flight_monitor_updates: Option>> = None; + // We use this one over the legacy since they represent the same data, just with a different + // key. We still need to read the legacy one as it's an even TLV. + let mut in_flight_monitor_updates: Option>> = None; let mut decode_update_add_htlcs: Option>> = None; let mut inbound_payment_id_secret = None; read_tlv_fields!(reader, { @@ -13548,11 +13560,12 @@ where (7, fake_scid_rand_bytes, option), (8, events_override, option), (9, claimable_htlc_purposes, optional_vec), - (10, in_flight_monitor_updates, option), + (10, _legacy_in_flight_monitor_updates, option), (11, probing_cookie_secret, option), (13, claimable_htlc_onion_fields, optional_vec), (14, decode_update_add_htlcs, option), (15, inbound_payment_id_secret, option), + (17, in_flight_monitor_updates, required), }); let mut decode_update_add_htlcs = decode_update_add_htlcs.unwrap_or_else(|| new_hash_map()); if fake_scid_rand_bytes.is_none() { @@ -13599,11 +13612,12 @@ where // Because the actual handling of the in-flight updates is the same, it's macro'ized here: let mut pending_background_events = Vec::new(); macro_rules! handle_in_flight_updates { - ($counterparty_node_id: expr, $chan_in_flight_upds: expr, $funding_txo: expr, - $monitor: expr, $peer_state: expr, $logger: expr, $channel_info_log: expr + ($counterparty_node_id: expr, $chan_in_flight_upds: expr, $monitor: expr, + $peer_state: expr, $logger: expr, $channel_info_log: expr ) => { { let mut max_in_flight_update_id = 0; $chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id()); + let funding_txo = $monitor.get_funding_txo().0; for update in $chan_in_flight_upds.iter() { log_trace!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}", update.update_id, $channel_info_log, &$monitor.channel_id()); @@ -13611,7 +13625,7 @@ where pending_background_events.push( BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id: $counterparty_node_id, - funding_txo: $funding_txo, + funding_txo: funding_txo.clone(), channel_id: $monitor.channel_id(), update: update.clone(), }); @@ -13630,7 +13644,7 @@ where .and_modify(|v| *v = cmp::max(max_in_flight_update_id, *v)) .or_insert(max_in_flight_update_id); } - if $peer_state.in_flight_monitor_updates.insert($funding_txo, $chan_in_flight_upds).is_some() { + if $peer_state.in_flight_monitor_updates.insert((funding_txo, $monitor.channel_id()), $chan_in_flight_upds).is_some() { log_error!($logger, "Duplicate in-flight monitor update set for the same channel!"); return Err(DecodeError::InvalidValue); } @@ -13641,28 +13655,27 @@ where for (counterparty_id, peer_state_mtx) in per_peer_state.iter_mut() { let mut peer_state_lock = peer_state_mtx.lock().unwrap(); let peer_state = &mut *peer_state_lock; - for phase in peer_state.channel_by_id.values() { + for (channel_id, phase) in &peer_state.channel_by_id { if let Some(chan) = phase.as_funded() { let logger = WithChannelContext::from(&args.logger, &chan.context, None); // Channels that were persisted have to be funded, otherwise they should have been // discarded. - let funding_txo = chan.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?; - let monitor = args.channel_monitors.get(&funding_txo) + let monitor = args.channel_monitors.get(channel_id) .expect("We already checked for monitor presence when loading channels"); let mut max_in_flight_update_id = monitor.get_latest_update_id(); if let Some(in_flight_upds) = &mut in_flight_monitor_updates { - if let Some(mut chan_in_flight_upds) = in_flight_upds.remove(&(*counterparty_id, funding_txo)) { + if let Some(mut chan_in_flight_upds) = in_flight_upds.remove(&(*counterparty_id, *channel_id)) { max_in_flight_update_id = cmp::max(max_in_flight_update_id, handle_in_flight_updates!(*counterparty_id, chan_in_flight_upds, - funding_txo, monitor, peer_state, logger, "")); + monitor, peer_state, logger, "")); } } if chan.get_latest_unblocked_monitor_update_id() > max_in_flight_update_id { // If the channel is ahead of the monitor, return DangerousValue: log_error!(logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!"); log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight", - chan.context.channel_id(), monitor.get_latest_update_id(), max_in_flight_update_id); + channel_id, monitor.get_latest_update_id(), max_in_flight_update_id); log_error!(logger, " but the ChannelManager is at update_id {}.", chan.get_latest_unblocked_monitor_update_id()); log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); @@ -13680,10 +13693,9 @@ where } if let Some(in_flight_upds) = in_flight_monitor_updates { - for ((counterparty_id, funding_txo), mut chan_in_flight_updates) in in_flight_upds { - let channel_id = funding_txo_to_channel_id.get(&funding_txo).copied(); - let logger = WithContext::from(&args.logger, Some(counterparty_id), channel_id, None); - if let Some(monitor) = args.channel_monitors.get(&funding_txo) { + for ((counterparty_id, channel_id), mut chan_in_flight_updates) in in_flight_upds { + let logger = WithContext::from(&args.logger, Some(counterparty_id), Some(channel_id), None); + if let Some(monitor) = args.channel_monitors.get(&channel_id) { // Now that we've removed all the in-flight monitor updates for channels that are // still open, we need to replay any monitor updates that are for closed channels, // creating the neccessary peer_state entries as we go. @@ -13691,12 +13703,11 @@ where Mutex::new(empty_peer_state()) }); let mut peer_state = peer_state_mutex.lock().unwrap(); - handle_in_flight_updates!(counterparty_id, chan_in_flight_updates, - funding_txo, monitor, peer_state, logger, "closed "); + handle_in_flight_updates!(counterparty_id, chan_in_flight_updates, monitor, + peer_state, logger, "closed "); } else { log_error!(logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!"); - log_error!(logger, " The ChannelMonitor for channel {} is missing.", if let Some(channel_id) = - channel_id { channel_id.to_string() } else { format!("with outpoint {}", funding_txo) } ); + log_error!(logger, " The ChannelMonitor for channel {} is missing.", channel_id.to_string()); log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds."); @@ -13748,7 +13759,7 @@ where .or_insert(update.update_id); } let in_flight_updates = per_peer_state.in_flight_monitor_updates - .entry(*funding_txo) + .entry((*funding_txo, *channel_id)) .or_insert_with(Vec::new); debug_assert!(!in_flight_updates.iter().any(|upd| upd == update)); in_flight_updates.push(update.clone()); @@ -13873,7 +13884,7 @@ where .filter_map(|(htlc_source, (htlc, preimage_opt))| { if let HTLCSource::PreviousHopData(prev_hop) = &htlc_source { if let Some(payment_preimage) = preimage_opt { - let inbound_edge_monitor = args.channel_monitors.get(&prev_hop.outpoint); + let inbound_edge_monitor = args.channel_monitors.get(&prev_hop.channel_id); // Note that for channels which have gone to chain, // `get_all_current_outbound_htlcs` is never pruned and always returns // a constant set until the monitor is removed/archived. Thus, we @@ -14361,7 +14372,7 @@ where ); } } - if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.outpoint) { + if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.channel_id) { // Note that this is unsafe as we no longer require the // `ChannelMonitor`s to be re-persisted prior to this // `ChannelManager` being persisted after we get started running. diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 720257b415c..b745348ddb9 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -707,7 +707,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { { let mut channel_monitors = new_hash_map(); for monitor in deserialized_monitors.iter() { - channel_monitors.insert(monitor.get_funding_txo().0, monitor); + channel_monitors.insert(monitor.channel_id(), monitor); } let scorer = RwLock::new(test_utils::TestScorer::new()); @@ -1136,7 +1136,7 @@ pub fn _reload_node<'a, 'b, 'c>(node: &'a Node<'a, 'b, 'c>, default_config: User let (_, node_deserialized) = { let mut channel_monitors = new_hash_map(); for monitor in monitors_read.iter() { - assert!(channel_monitors.insert(monitor.get_funding_txo().0, monitor).is_none()); + assert!(channel_monitors.insert(monitor.channel_id(), monitor).is_none()); } <(BlockHash, TestChannelManager<'b, 'c>)>::read(&mut node_read, ChannelManagerReadArgs { default_config, diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 054d4fdf5af..34eddaa20b5 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -432,7 +432,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { chain_monitor: nodes[0].chain_monitor, tx_broadcaster: nodes[0].tx_broadcaster, logger: &logger, - channel_monitors: node_0_stale_monitors.iter().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(), + channel_monitors: node_0_stale_monitors.iter().map(|monitor| { (monitor.channel_id(), monitor) }).collect(), }) { } else { panic!("If the monitor(s) are stale, this indicates a bug and we should get an Err return"); }; @@ -450,7 +450,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { chain_monitor: nodes[0].chain_monitor, tx_broadcaster: nodes[0].tx_broadcaster, logger: &logger, - channel_monitors: node_0_monitors.iter().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(), + channel_monitors: node_0_monitors.iter().map(|monitor| { (monitor.channel_id(), monitor) }).collect(), }).unwrap(); nodes_0_deserialized = nodes_0_deserialized_tmp; assert!(nodes_0_read.is_empty()); From 1c7a0a13f2aa6cb9b930627e297cdc970f2f3f01 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Fri, 17 Jan 2025 14:28:30 -0800 Subject: [PATCH 3/3] Stop tracking funding outpoints for in-flight monitor updates We no longer need the funding outpoints as we can fully rely on the channel IDs instead to track them. This acts as a breaking change, as we will no longer write the legacy version of in-flight monitor updates, preventing downgrades to versions that only supported such. Along the way, we clean up `handle_new_monitor_update` and `MonitorUpdateRegeneratedOnStartup` now that their `funding_txo` parameter is unused. --- lightning/src/ln/channelmanager.rs | 133 ++++++++++++----------------- 1 file changed, 54 insertions(+), 79 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 26d9c96ac0f..995e70bc85d 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1028,7 +1028,6 @@ enum BackgroundEvent { /// are regenerated on startup. MonitorUpdateRegeneratedOnStartup { counterparty_node_id: PublicKey, - funding_txo: OutPoint, channel_id: ChannelId, update: ChannelMonitorUpdate }, @@ -1174,7 +1173,6 @@ impl_writeable_tlv_based_enum!(EventCompletionAction, /// forwarded payments. struct HTLCClaimSource { counterparty_node_id: Option, - funding_txo: OutPoint, channel_id: ChannelId, htlc_id: u64, } @@ -1183,7 +1181,6 @@ impl From<&MPPClaimHTLCSource> for HTLCClaimSource { fn from(o: &MPPClaimHTLCSource) -> HTLCClaimSource { HTLCClaimSource { counterparty_node_id: Some(o.counterparty_node_id), - funding_txo: o.funding_txo, channel_id: o.channel_id, htlc_id: o.htlc_id, } @@ -1309,8 +1306,7 @@ pub(super) struct PeerState where SP::Target: SignerProvider { /// for broadcast messages, where ordering isn't as strict). pub(super) pending_msg_events: Vec, /// Map from Channel IDs to pending [`ChannelMonitorUpdate`]s which have been passed to the - /// user but which have not yet completed. We still keep the funding outpoint around to backfill - /// the legacy TLV field to support downgrading. + /// user but which have not yet completed. /// /// Note that the channel may no longer exist. For example if the channel was closed but we /// later needed to claim an HTLC which is pending on-chain, we may generate a monitor update @@ -1322,7 +1318,7 @@ pub(super) struct PeerState where SP::Target: SignerProvider { /// where we complete one [`ChannelMonitorUpdate`] (but there are more pending as background /// events) but we conclude all pending [`ChannelMonitorUpdate`]s have completed and its safe /// to run post-completion actions. - in_flight_monitor_updates: BTreeMap<(OutPoint, ChannelId), Vec>, + in_flight_monitor_updates: BTreeMap>, /// Map from a specific channel to some action(s) that should be taken when all pending /// [`ChannelMonitorUpdate`]s for the channel complete updating. /// @@ -2974,9 +2970,9 @@ macro_rules! handle_error { /// [`ChannelMonitor`]/channel funding transaction) to begin with. macro_rules! locked_close_channel { ($self: ident, $peer_state: expr, $channel_context: expr, $shutdown_res_mut: expr) => {{ - if let Some((_, funding_txo, _, update)) = $shutdown_res_mut.monitor_update.take() { - handle_new_monitor_update!($self, funding_txo, update, $peer_state, - $channel_context, REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER); + if let Some((_, _, _, update)) = $shutdown_res_mut.monitor_update.take() { + handle_new_monitor_update!($self, update, $peer_state, $channel_context, + REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER); } // If there's a possibility that we need to generate further monitor updates for this // channel, we need to store the last update_id of it. However, we don't want to insert @@ -3281,11 +3277,11 @@ macro_rules! handle_new_monitor_update { handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan)) }; ( - $self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $logger: expr, - $chan_id: expr, $counterparty_node_id: expr, $in_flight_updates: ident, $update_idx: ident, + $self: ident, $update: expr, $peer_state: expr, $logger: expr, $chan_id: expr, + $counterparty_node_id: expr, $in_flight_updates: ident, $update_idx: ident, _internal_outer, $completed: expr ) => { { - $in_flight_updates = $peer_state.in_flight_monitor_updates.entry(($funding_txo, $chan_id)) + $in_flight_updates = $peer_state.in_flight_monitor_updates.entry($chan_id) .or_insert_with(Vec::new); // During startup, we push monitor updates as background events through to here in // order to replay updates that were in-flight when we shut down. Thus, we have to @@ -3304,7 +3300,6 @@ macro_rules! handle_new_monitor_update { // during the startup sequence should be replayed exactly if we immediately crash. let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id: $counterparty_node_id, - funding_txo: $funding_txo, channel_id: $chan_id, update: $in_flight_updates[$update_idx].clone(), }; @@ -3321,7 +3316,7 @@ macro_rules! handle_new_monitor_update { } } }; ( - $self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $chan_context: expr, + $self: ident, $update: expr, $peer_state: expr, $chan_context: expr, REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER ) => { { let logger = WithChannelContext::from(&$self.logger, &$chan_context, None); @@ -3329,21 +3324,21 @@ macro_rules! handle_new_monitor_update { let counterparty_node_id = $chan_context.get_counterparty_node_id(); let in_flight_updates; let idx; - handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger, chan_id, + handle_new_monitor_update!($self, $update, $peer_state, logger, chan_id, counterparty_node_id, in_flight_updates, idx, _internal_outer, { let _ = in_flight_updates.remove(idx); }) } }; ( - $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, + $self: ident, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $counterparty_node_id: expr, $channel_id: expr, POST_CHANNEL_CLOSE ) => { { let logger = WithContext::from(&$self.logger, Some($counterparty_node_id), Some($channel_id), None); let in_flight_updates; let idx; - handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger, - $channel_id, $counterparty_node_id, in_flight_updates, idx, _internal_outer, + handle_new_monitor_update!($self, $update, $peer_state, logger, $channel_id, + $counterparty_node_id, in_flight_updates, idx, _internal_outer, { let _ = in_flight_updates.remove(idx); if in_flight_updates.is_empty() { @@ -3358,7 +3353,7 @@ macro_rules! handle_new_monitor_update { }) } }; ( - $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, + $self: ident, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr ) => { { let logger = WithChannelContext::from(&$self.logger, &$chan.context, None); @@ -3366,7 +3361,7 @@ macro_rules! handle_new_monitor_update { let counterparty_node_id = $chan.context.get_counterparty_node_id(); let in_flight_updates; let idx; - handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger, chan_id, + handle_new_monitor_update!($self, $update, $peer_state, logger, chan_id, counterparty_node_id, in_flight_updates, idx, _internal_outer, { let _ = in_flight_updates.remove(idx); @@ -3829,7 +3824,6 @@ where match peer_state.channel_by_id.entry(channel_id.clone()) { hash_map::Entry::Occupied(mut chan_phase_entry) => { if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() { - let funding_txo_opt = chan.context.get_funding_txo(); let their_features = &peer_state.latest_features; let (shutdown_msg, mut monitor_update_opt, htlcs) = chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?; @@ -3848,8 +3842,8 @@ where // Update the monitor with the shutdown script if necessary. if let Some(monitor_update) = monitor_update_opt.take() { - handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, - peer_state_lock, peer_state, per_peer_state, chan); + handle_new_monitor_update!(self, monitor_update, peer_state_lock, + peer_state, per_peer_state, chan); } } else { let mut shutdown_res = chan_phase_entry.get_mut().context_mut() @@ -3944,7 +3938,7 @@ where /// Applies a [`ChannelMonitorUpdate`] which may or may not be for a channel which is closed. fn apply_post_close_monitor_update( - &self, counterparty_node_id: PublicKey, channel_id: ChannelId, funding_txo: OutPoint, + &self, counterparty_node_id: PublicKey, channel_id: ChannelId, monitor_update: ChannelMonitorUpdate, ) { // Note that there may be some post-close updates which need to be well-ordered with @@ -3957,8 +3951,8 @@ where match peer_state.channel_by_id.entry(channel_id) { hash_map::Entry::Occupied(mut chan_phase) => { if let Some(chan) = chan_phase.get_mut().as_funded_mut() { - handle_new_monitor_update!(self, funding_txo, - monitor_update, peer_state_lock, peer_state, per_peer_state, chan); + handle_new_monitor_update!(self, monitor_update, peer_state_lock, peer_state, + per_peer_state, chan); return; } else { debug_assert!(false, "We shouldn't have an update for a non-funded channel"); @@ -3968,8 +3962,8 @@ where } handle_new_monitor_update!( - self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, - counterparty_node_id, channel_id, POST_CHANNEL_CLOSE + self, monitor_update, peer_state_lock, peer_state, per_peer_state, counterparty_node_id, + channel_id, POST_CHANNEL_CLOSE ); } @@ -3997,9 +3991,9 @@ where let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } - if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update { + if let Some((_, _, _channel_id, monitor_update)) = shutdown_res.monitor_update { debug_assert!(false, "This should have been handled in `locked_close_channel`"); - self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update); + self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, monitor_update); } if self.background_events_processed_since_startup.load(Ordering::Acquire) { // If a `ChannelMonitorUpdate` was applied (i.e. any time we have a funding txo and are @@ -4007,11 +4001,11 @@ where // `MonitorUpdateCompletionAction`s. // TODO: If we do the `in_flight_monitor_updates.is_empty()` check in // `locked_close_channel` we can skip the locks here. - if let Some(funding_txo) = shutdown_res.channel_funding_txo { + if shutdown_res.channel_funding_txo.is_some() { let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mtx) = per_peer_state.get(&shutdown_res.counterparty_node_id) { let mut peer_state = peer_state_mtx.lock().unwrap(); - if peer_state.in_flight_monitor_updates.get(&(funding_txo, shutdown_res.channel_id)).map(|l| l.is_empty()).unwrap_or(true) { + if peer_state.in_flight_monitor_updates.get(&shutdown_res.channel_id).map(|l| l.is_empty()).unwrap_or(true) { let update_actions = peer_state.monitor_update_blocked_actions .remove(&shutdown_res.channel_id).unwrap_or(Vec::new()); @@ -4561,7 +4555,6 @@ where if !chan.context.is_live() { return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()}); } - let funding_txo = chan.context.get_funding_txo().unwrap(); let logger = WithChannelContext::from(&self.logger, &chan.context, Some(*payment_hash)); let send_res = chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { @@ -4572,7 +4565,7 @@ where }, onion_packet, None, &self.fee_estimator, &&logger); match break_chan_phase_entry!(self, peer_state, send_res, chan_phase_entry) { Some(monitor_update) => { - match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) { + match handle_new_monitor_update!(self, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) { false => { // Note that MonitorUpdateInProgress here indicates (per function // docs) that we will resend the commitment update once monitor @@ -6313,8 +6306,8 @@ where // monitor updating completing. let _ = self.chain_monitor.update_channel(channel_id, &update); }, - BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => { - self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update); + BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, channel_id, update } => { + self.apply_post_close_monitor_update(counterparty_node_id, channel_id, update); }, BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => { let per_peer_state = self.per_peer_state.read().unwrap(); @@ -7044,7 +7037,6 @@ where let htlc_source = HTLCClaimSource { counterparty_node_id, - funding_txo: prev_hop.outpoint, channel_id: prev_hop.channel_id, htlc_id: prev_hop.htlc_id, }; @@ -7101,7 +7093,7 @@ where if let Some(raa_blocker) = raa_blocker_opt { peer_state.actions_blocking_raa_monitor_updates.entry(chan_id).or_insert_with(Vec::new).push(raa_blocker); } - handle_new_monitor_update!(self, prev_hop.funding_txo, monitor_update, peer_state_opt, + handle_new_monitor_update!(self, monitor_update, peer_state_opt, peer_state, per_peer_state, chan); } UpdateFulfillCommitFetch::DuplicateClaim {} => { @@ -7228,8 +7220,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } handle_new_monitor_update!( - self, prev_hop.funding_txo, preimage_update, peer_state, peer_state, per_peer_state, - counterparty_node_id, chan_id, POST_CHANNEL_CLOSE + self, preimage_update, peer_state, peer_state, per_peer_state, counterparty_node_id, + chan_id, POST_CHANNEL_CLOSE ); } @@ -7575,7 +7567,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state = &mut *peer_state_lock; let remaining_in_flight = - if let Some(pending) = peer_state.in_flight_monitor_updates.get_mut(&(*funding_txo, *channel_id)) { + if let Some(pending) = peer_state.in_flight_monitor_updates.get_mut(channel_id) { pending.retain(|upd| upd.update_id > highest_applied_update_id); pending.len() } else { 0 }; @@ -8547,7 +8539,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if chan.sent_shutdown() { " after we initiated shutdown" } else { "" }); } - let funding_txo_opt = chan.context.get_funding_txo(); let (shutdown, monitor_update_opt, htlcs) = try_chan_phase_entry!(self, peer_state, chan.shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_phase_entry); dropped_htlcs = htlcs; @@ -8563,8 +8554,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } // Update the monitor with the shutdown script if necessary. if let Some(monitor_update) = monitor_update_opt { - handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, - peer_state_lock, peer_state, per_peer_state, chan); + handle_new_monitor_update!(self, monitor_update, peer_state_lock, + peer_state, per_peer_state, chan); } }, None => { @@ -8855,7 +8846,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ hash_map::Entry::Occupied(mut chan_phase_entry) => { if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() { let logger = WithChannelContext::from(&self.logger, &chan.context, None); - let funding_txo = chan.context.get_funding_txo(); if chan.interactive_tx_signing_session.is_some() { let monitor = try_chan_phase_entry!( @@ -8879,7 +8869,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let monitor_update_opt = try_chan_phase_entry!( self, peer_state, chan.commitment_signed(msg, &&logger), chan_phase_entry); if let Some(monitor_update) = monitor_update_opt { - handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock, + handle_new_monitor_update!(self, monitor_update, peer_state_lock, peer_state, per_peer_state, chan); } } @@ -9081,10 +9071,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let (htlcs_to_fail, monitor_update_opt) = try_chan_phase_entry!(self, peer_state, chan.revoke_and_ack(&msg, &self.fee_estimator, &&logger, mon_update_blocked), chan_phase_entry); if let Some(monitor_update) = monitor_update_opt { - let funding_txo = funding_txo_opt - .expect("Funding outpoint must have been set for RAA handling to succeed"); - handle_new_monitor_update!(self, funding_txo, monitor_update, - peer_state_lock, peer_state, per_peer_state, chan); + handle_new_monitor_update!(self, monitor_update, peer_state_lock, + peer_state, per_peer_state, chan); } htlcs_to_fail } else { @@ -9424,7 +9412,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ .filter_map(|(chan_id, phase)| phase.as_funded_mut().map(|chan| (chan_id, chan))) { let counterparty_node_id = chan.context.get_counterparty_node_id(); - let funding_txo = chan.context.get_funding_txo(); let (monitor_opt, holding_cell_failed_htlcs) = chan.maybe_free_holding_cell_htlcs(&self.fee_estimator, &&WithChannelContext::from(&self.logger, &chan.context, None)); if !holding_cell_failed_htlcs.is_empty() { @@ -9433,8 +9420,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(monitor_update) = monitor_opt { has_monitor_update = true; - handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, - peer_state_lock, peer_state, per_peer_state, chan); + handle_new_monitor_update!(self, monitor_update, peer_state_lock, + peer_state, per_peer_state, chan); continue 'peer_loop; } } @@ -10710,8 +10697,8 @@ where if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() { log_debug!(logger, "Unlocking monitor updating for channel {} and updating monitor", channel_id); - handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update, - peer_state_lck, peer_state, per_peer_state, chan); + handle_new_monitor_update!(self, monitor_update, peer_state_lck, + peer_state, per_peer_state, chan); if further_update_exists { // If there are more `ChannelMonitorUpdate`s to process, restart at the // top of the loop. @@ -12987,17 +12974,10 @@ where pending_claiming_payments = None; } - let mut legacy_in_flight_monitor_updates: Option>> = None; let mut in_flight_monitor_updates: Option>> = None; for ((counterparty_id, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) { - for ((funding_txo, channel_id), updates) in peer_state.in_flight_monitor_updates.iter() { + for (channel_id, updates) in peer_state.in_flight_monitor_updates.iter() { if !updates.is_empty() { - if legacy_in_flight_monitor_updates.is_none() { - legacy_in_flight_monitor_updates = Some(new_hash_map()); - } - legacy_in_flight_monitor_updates.as_mut().unwrap() - .insert((counterparty_id, funding_txo), updates); - if in_flight_monitor_updates.is_none() { in_flight_monitor_updates = Some(new_hash_map()); } @@ -13017,7 +12997,7 @@ where (7, self.fake_scid_rand_bytes, required), (8, if events_not_backwards_compatible { Some(&*events) } else { None }, option), (9, htlc_purposes, required_vec), - (10, legacy_in_flight_monitor_updates, option), + // 10 was previously used to write a legacy version of `in_flight_monitor_updates` (11, self.probing_cookie_secret, required), (13, htlc_onion_fields, optional_vec), (14, decode_update_add_htlcs_opt, option), @@ -13303,7 +13283,7 @@ where if shutdown_result.unbroadcasted_batch_funding_txid.is_some() { return Err(DecodeError::InvalidValue); } - if let Some((counterparty_node_id, funding_txo, channel_id, mut update)) = shutdown_result.monitor_update { + if let Some((counterparty_node_id, _funding_txo, channel_id, mut update)) = shutdown_result.monitor_update { // Our channel information is out of sync with the `ChannelMonitor`, so // force the update to use the `ChannelMonitor`'s update_id for the close // update. @@ -13317,7 +13297,7 @@ where .or_insert(latest_update_id); close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id, funding_txo, channel_id, update + counterparty_node_id, channel_id, update }); } failed_htlcs.append(&mut shutdown_result.dropped_outbound_htlcs); @@ -13431,7 +13411,6 @@ where if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() { let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, - funding_txo, channel_id, update: monitor_update, }; @@ -13544,9 +13523,7 @@ where let mut pending_claiming_payments = Some(new_hash_map()); let mut monitor_update_blocked_actions_per_peer: Option>)>> = Some(Vec::new()); let mut events_override = None; - let mut _legacy_in_flight_monitor_updates: Option>> = None; - // We use this one over the legacy since they represent the same data, just with a different - // key. We still need to read the legacy one as it's an even TLV. + let mut _legacy_in_flight_monitor_updates: Option>> = None; let mut in_flight_monitor_updates: Option>> = None; let mut decode_update_add_htlcs: Option>> = None; let mut inbound_payment_id_secret = None; @@ -13560,7 +13537,7 @@ where (7, fake_scid_rand_bytes, option), (8, events_override, option), (9, claimable_htlc_purposes, optional_vec), - (10, _legacy_in_flight_monitor_updates, option), + (10, _legacy_in_flight_monitor_updates, (static_value, None)), (11, probing_cookie_secret, option), (13, claimable_htlc_onion_fields, optional_vec), (14, decode_update_add_htlcs, option), @@ -13617,7 +13594,6 @@ where ) => { { let mut max_in_flight_update_id = 0; $chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id()); - let funding_txo = $monitor.get_funding_txo().0; for update in $chan_in_flight_upds.iter() { log_trace!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}", update.update_id, $channel_info_log, &$monitor.channel_id()); @@ -13625,7 +13601,6 @@ where pending_background_events.push( BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id: $counterparty_node_id, - funding_txo: funding_txo.clone(), channel_id: $monitor.channel_id(), update: update.clone(), }); @@ -13644,7 +13619,7 @@ where .and_modify(|v| *v = cmp::max(max_in_flight_update_id, *v)) .or_insert(max_in_flight_update_id); } - if $peer_state.in_flight_monitor_updates.insert((funding_txo, $monitor.channel_id()), $chan_in_flight_upds).is_some() { + if $peer_state.in_flight_monitor_updates.insert($monitor.channel_id(), $chan_in_flight_upds).is_some() { log_error!($logger, "Duplicate in-flight monitor update set for the same channel!"); return Err(DecodeError::InvalidValue); } @@ -13723,18 +13698,18 @@ where pending_background_events.reserve(close_background_events.len()); 'each_bg_event: for mut new_event in close_background_events { if let BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id, funding_txo, channel_id, update, + counterparty_node_id, channel_id, update, } = &mut new_event { debug_assert_eq!(update.updates.len(), 1); debug_assert!(matches!(update.updates[0], ChannelMonitorUpdateStep::ChannelForceClosed { .. })); let mut updated_id = false; for pending_event in pending_background_events.iter() { if let BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id: pending_cp, funding_txo: pending_funding, - channel_id: pending_chan_id, update: pending_update, + counterparty_node_id: pending_cp, + channel_id: pending_chan_id, + update: pending_update, } = pending_event { let for_same_channel = counterparty_node_id == pending_cp - && funding_txo == pending_funding && channel_id == pending_chan_id; if for_same_channel { debug_assert!(update.update_id >= pending_update.update_id); @@ -13759,7 +13734,7 @@ where .or_insert(update.update_id); } let in_flight_updates = per_peer_state.in_flight_monitor_updates - .entry((*funding_txo, *channel_id)) + .entry(*channel_id) .or_insert_with(Vec::new); debug_assert!(!in_flight_updates.iter().any(|upd| upd == update)); in_flight_updates.push(update.clone());