From 2a4259566e8d4946e5b8b0bfb58b5eb6a62db611 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 4 May 2022 16:29:29 +0000 Subject: [PATCH 1/9] Enable removal of `OnionPayload::Invoice::_legacy_hop_data` later In fc77c57c3c6e165d26cb5c1f5d1afee0ecd02589 we stopped using the `FinalOnionHopData` in `OnionPayload::Invoice` directly and renamed it `_legacy_hop_data` with the intent of removing it in a few versions. However, we continue to check that it was included in the serialized data, meaning we would not be able to remove it without breaking ability to serialize full `ChannelManager`s. This fixes that by making the `_legacy_hop_data` an `Option` which we will happily handle just fine if its `None`. --- lightning/src/ln/channelmanager.rs | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 8725059c360..cf716f4853f 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -164,7 +164,7 @@ enum OnionPayload { Invoice { /// This is only here for backwards-compatibility in serialization, in the future it can be /// removed, breaking clients running 0.0.106 and earlier. - _legacy_hop_data: msgs::FinalOnionHopData, + _legacy_hop_data: Option, }, /// Contains the payer-provided preimage. Spontaneous(PaymentPreimage), @@ -3098,7 +3098,7 @@ impl ChannelMana prev_funding_outpoint } => { let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret) = match routing { PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry, phantom_shared_secret } => { - let _legacy_hop_data = payment_data.clone(); + let _legacy_hop_data = Some(payment_data.clone()); (incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data }, Some(payment_data), phantom_shared_secret) }, PendingHTLCRouting::ReceiveKeysend { payment_preimage, incoming_cltv_expiry } => @@ -6061,13 +6061,9 @@ impl_writeable_tlv_based!(HTLCPreviousHopData, { impl Writeable for ClaimableHTLC { fn write(&self, writer: &mut W) -> Result<(), io::Error> { - let payment_data = match &self.onion_payload { - OnionPayload::Invoice { _legacy_hop_data } => Some(_legacy_hop_data), - _ => None, - }; - let keysend_preimage = match self.onion_payload { - OnionPayload::Invoice { .. } => None, - OnionPayload::Spontaneous(preimage) => Some(preimage.clone()), + let (payment_data, keysend_preimage) = match &self.onion_payload { + OnionPayload::Invoice { _legacy_hop_data } => (_legacy_hop_data.as_ref(), None), + OnionPayload::Spontaneous(preimage) => (None, Some(preimage)), }; write_tlv_fields!(writer, { (0, self.prev_hop, required), @@ -6108,13 +6104,13 @@ impl Readable for ClaimableHTLC { OnionPayload::Spontaneous(p) }, None => { - if payment_data.is_none() { - return Err(DecodeError::InvalidValue) - } if total_msat.is_none() { + if payment_data.is_none() { + return Err(DecodeError::InvalidValue) + } total_msat = Some(payment_data.as_ref().unwrap().total_msat); } - OnionPayload::Invoice { _legacy_hop_data: payment_data.unwrap() } + OnionPayload::Invoice { _legacy_hop_data: payment_data } }, }; Ok(Self { From bd1e20d49e5b67b55c22ee8927e546327d98e042 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 4 May 2022 17:49:09 +0000 Subject: [PATCH 2/9] Store an `events::PaymentPurpose` with each claimable payment In fc77c57c3c6e165d26cb5c1f5d1afee0ecd02589 we stopped using the `FInalOnionHopData` in `OnionPayload::Invoice` directly and intend to remove it eventually. However, in the next few commits we need access to the payment secret when claimaing a payment, as we create a new `PaymentPurpose` during the claim process for a new event. In order to get access to a `PaymentPurpose` without having access to the `FinalOnionHopData` we here change the storage of `claimable_htlcs` to store a single `PaymentPurpose` explicitly with each set of claimable HTLCs. --- lightning/src/ln/channelmanager.rs | 88 +++++++++++++++++++++++------- lightning/src/util/events.rs | 8 +++ 2 files changed, 77 insertions(+), 19 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index cf716f4853f..71ae6170aa6 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -418,7 +418,7 @@ pub(super) struct ChannelHolder { /// Note that while this is held in the same mutex as the channels themselves, no consistency /// guarantees are made about the channels given here actually existing anymore by the time you /// go to read them! - claimable_htlcs: HashMap>, + claimable_htlcs: HashMap)>, /// Messages to send to peers - pushed to in the same lock that they are generated in (except /// for broadcast messages, where ordering isn't as strict). pub(super) pending_msg_events: Vec, @@ -3143,8 +3143,14 @@ impl ChannelMana macro_rules! check_total_value { ($payment_data: expr, $payment_preimage: expr) => {{ let mut payment_received_generated = false; - let htlcs = channel_state.claimable_htlcs.entry(payment_hash) - .or_insert(Vec::new()); + let purpose = || { + events::PaymentPurpose::InvoicePayment { + payment_preimage: $payment_preimage, + payment_secret: $payment_data.payment_secret, + } + }; + let (_, htlcs) = channel_state.claimable_htlcs.entry(payment_hash) + .or_insert_with(|| (purpose(), Vec::new())); if htlcs.len() == 1 { if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload { log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash", log_bytes!(payment_hash.0)); @@ -3175,10 +3181,7 @@ impl ChannelMana htlcs.push(claimable_htlc); new_events.push(events::Event::PaymentReceived { payment_hash, - purpose: events::PaymentPurpose::InvoicePayment { - payment_preimage: $payment_preimage, - payment_secret: $payment_data.payment_secret, - }, + purpose: purpose(), amt: total_value, }); payment_received_generated = true; @@ -3216,11 +3219,12 @@ impl ChannelMana OnionPayload::Spontaneous(preimage) => { match channel_state.claimable_htlcs.entry(payment_hash) { hash_map::Entry::Vacant(e) => { - e.insert(vec![claimable_htlc]); + let purpose = events::PaymentPurpose::SpontaneousPayment(preimage); + e.insert((purpose.clone(), vec![claimable_htlc])); new_events.push(events::Event::PaymentReceived { payment_hash, amt: amt_to_forward, - purpose: events::PaymentPurpose::SpontaneousPayment(preimage), + purpose, }); }, hash_map::Entry::Occupied(_) => { @@ -3459,7 +3463,7 @@ impl ChannelMana true }); - channel_state.claimable_htlcs.retain(|payment_hash, htlcs| { + channel_state.claimable_htlcs.retain(|payment_hash, (_, htlcs)| { if htlcs.is_empty() { // This should be unreachable debug_assert!(false); @@ -3503,7 +3507,7 @@ impl ChannelMana let mut channel_state = Some(self.channel_state.lock().unwrap()); let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(payment_hash); - if let Some(mut sources) = removed_source { + if let Some((_, mut sources)) = removed_source { for htlc in sources.drain(..) { if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); } let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec(); @@ -3803,7 +3807,7 @@ impl ChannelMana let mut channel_state = Some(self.channel_state.lock().unwrap()); let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&payment_hash); - if let Some(mut sources) = removed_source { + if let Some((_, mut sources)) = removed_source { assert!(!sources.is_empty()); // If we are claiming an MPP payment, we have to take special care to ensure that each @@ -5540,7 +5544,7 @@ where }); if let Some(height) = height_opt { - channel_state.claimable_htlcs.retain(|payment_hash, htlcs| { + channel_state.claimable_htlcs.retain(|payment_hash, (_, htlcs)| { htlcs.retain(|htlc| { // If height is approaching the number of blocks we think it takes us to get // our commitment transaction confirmed before the HTLC expires, plus the @@ -6283,13 +6287,15 @@ impl Writeable f } } + let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new(); (channel_state.claimable_htlcs.len() as u64).write(writer)?; - for (payment_hash, previous_hops) in channel_state.claimable_htlcs.iter() { + for (payment_hash, (purpose, previous_hops)) in channel_state.claimable_htlcs.iter() { payment_hash.write(writer)?; (previous_hops.len() as u64).write(writer)?; for htlc in previous_hops.iter() { htlc.write(writer)?; } + htlc_purposes.push(purpose); } let per_peer_state = self.per_peer_state.write().unwrap(); @@ -6366,6 +6372,7 @@ impl Writeable f (3, pending_outbound_payments, required), (5, self.our_network_pubkey, required), (7, self.fake_scid_rand_bytes, required), + (9, htlc_purposes, vec_type), }); Ok(()) @@ -6584,15 +6591,15 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> } let claimable_htlcs_count: u64 = Readable::read(reader)?; - let mut claimable_htlcs = HashMap::with_capacity(cmp::min(claimable_htlcs_count as usize, 128)); + let mut claimable_htlcs_list = Vec::with_capacity(cmp::min(claimable_htlcs_count as usize, 128)); for _ in 0..claimable_htlcs_count { let payment_hash = Readable::read(reader)?; let previous_hops_len: u64 = Readable::read(reader)?; let mut previous_hops = Vec::with_capacity(cmp::min(previous_hops_len as usize, MAX_ALLOC_SIZE/mem::size_of::())); for _ in 0..previous_hops_len { - previous_hops.push(Readable::read(reader)?); + previous_hops.push(::read(reader)?); } - claimable_htlcs.insert(payment_hash, previous_hops); + claimable_htlcs_list.push((payment_hash, previous_hops)); } let peer_count: u64 = Readable::read(reader)?; @@ -6662,11 +6669,13 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> let mut pending_outbound_payments = None; let mut received_network_pubkey: Option = None; let mut fake_scid_rand_bytes: Option<[u8; 32]> = None; + let mut claimable_htlc_purposes = None; read_tlv_fields!(reader, { (1, pending_outbound_payments_no_retry, option), (3, pending_outbound_payments, option), (5, received_network_pubkey, option), (7, fake_scid_rand_bytes, option), + (9, claimable_htlc_purposes, vec_type), }); if fake_scid_rand_bytes.is_none() { fake_scid_rand_bytes = Some(args.keys_manager.get_secure_random_bytes()); @@ -6727,6 +6736,49 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> } } + let inbound_pmt_key_material = args.keys_manager.get_inbound_payment_key_material(); + let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material); + + let mut claimable_htlcs = HashMap::with_capacity(claimable_htlcs_list.len()); + if let Some(mut purposes) = claimable_htlc_purposes { + if purposes.len() != claimable_htlcs_list.len() { + return Err(DecodeError::InvalidValue); + } + for (purpose, (payment_hash, previous_hops)) in purposes.drain(..).zip(claimable_htlcs_list.drain(..)) { + claimable_htlcs.insert(payment_hash, (purpose, previous_hops)); + } + } else { + // LDK versions prior to 0.0.107 did not write a `pending_htlc_purposes`, but do + // include a `_legacy_hop_data` in the `OnionPayload`. + for (payment_hash, previous_hops) in claimable_htlcs_list.drain(..) { + if previous_hops.is_empty() { + return Err(DecodeError::InvalidValue); + } + let purpose = match &previous_hops[0].onion_payload { + OnionPayload::Invoice { _legacy_hop_data } => { + if let Some(hop_data) = _legacy_hop_data { + events::PaymentPurpose::InvoicePayment { + payment_preimage: match pending_inbound_payments.get(&payment_hash) { + Some(inbound_payment) => inbound_payment.payment_preimage, + None => match inbound_payment::verify(payment_hash, &hop_data, 0, &expanded_inbound_key, &args.logger) { + Ok(payment_preimage) => payment_preimage, + Err(()) => { + log_error!(args.logger, "Failed to read claimable payment data for HTLC with payment hash {} - was not a pending inbound payment and didn't match our payment key", log_bytes!(payment_hash.0)); + return Err(DecodeError::InvalidValue); + } + } + }, + payment_secret: hop_data.payment_secret, + } + } else { return Err(DecodeError::InvalidValue); } + }, + OnionPayload::Spontaneous(payment_preimage) => + events::PaymentPurpose::SpontaneousPayment(*payment_preimage), + }; + claimable_htlcs.insert(payment_hash, (purpose, previous_hops)); + } + } + let mut secp_ctx = Secp256k1::new(); secp_ctx.seeded_randomize(&args.keys_manager.get_secure_random_bytes()); @@ -6772,8 +6824,6 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> } } - let inbound_pmt_key_material = args.keys_manager.get_inbound_payment_key_material(); - let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material); let channel_manager = ChannelManager { genesis_hash, fee_estimator: args.fee_estimator, diff --git a/lightning/src/util/events.rs b/lightning/src/util/events.rs index ef0c619b02e..aebdcf415c7 100644 --- a/lightning/src/util/events.rs +++ b/lightning/src/util/events.rs @@ -66,6 +66,14 @@ pub enum PaymentPurpose { SpontaneousPayment(PaymentPreimage), } +impl_writeable_tlv_based_enum!(PaymentPurpose, + (0, InvoicePayment) => { + (0, payment_preimage, option), + (2, payment_secret, required), + }; + (2, SpontaneousPayment) +); + #[derive(Clone, Debug, PartialEq)] /// The reason the channel was closed. See individual variants more details. pub enum ClosureReason { From 28c70ac50685b546a2fbaebea34acf1aa364cf66 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 18 Apr 2022 15:42:11 +0000 Subject: [PATCH 3/9] Ensure all HTLCs for a claimed payment are claimed on startup While the HTLC-claim process happens across all MPP parts under one lock, this doesn't imply that they are claimed fully atomically on disk. Ultimately, an application can crash after persisting one `ChannelMonitorUpdate` out of multiple monitor updates needed for the full claim. Previously, this would leave us in a very bad state - because of the all-channels-available check in `claim_funds` we'd refuse to claim the payment again on restart (even though the `PaymentReceived` event will be passed to the user again), and we'd end up having partially claimed the payment! The fix for the consistency part of this issue is pretty straightforward - just check for this condition on startup and complete the claim across all channels/`ChannelMonitor`s if we detect it. This still leaves us in a confused state from the perspective of the user, however - we've actually claimed a payment but when they call `claim_funds` we return `false` indicating it could not be claimed. --- lightning/src/chain/channelmonitor.rs | 7 +- lightning/src/ln/channel.rs | 26 ++++ lightning/src/ln/channelmanager.rs | 34 +++- lightning/src/ln/functional_test_utils.rs | 10 +- lightning/src/ln/functional_tests.rs | 180 ++++++++++++++++++++++ 5 files changed, 252 insertions(+), 5 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 738fff3837c..fd66e585208 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -1085,7 +1085,8 @@ impl ChannelMonitor { self.inner.lock().unwrap().provide_latest_holder_commitment_tx(holder_commitment_tx, htlc_outputs).map_err(|_| ()) } - #[cfg(test)] + /// This is used to provide payment preimage(s) out-of-band during startup without updating the + /// off-chain state with a new commitment transaction. pub(crate) fn provide_payment_preimage( &self, payment_hash: &PaymentHash, @@ -1631,6 +1632,10 @@ impl ChannelMonitor { res } + + pub(crate) fn get_stored_preimages(&self) -> HashMap { + self.inner.lock().unwrap().payment_preimages.clone() + } } /// Compares a broadcasted commitment transaction's HTLCs with those in the latest state, diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 43032c51a3c..1d204d18a17 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1703,6 +1703,28 @@ impl Channel { make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey()) } + /// Claims an HTLC while we're disconnected from a peer, dropping the ChannelMonitorUpdate + /// entirely. + /// + /// The ChannelMonitor for this channel MUST be updated out-of-band with the preimage provided + /// (i.e. without calling [`crate::chain::Watch::update_channel`]). + /// + /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is + /// disconnected). + pub fn claim_htlc_while_disconnected_dropping_mon_update + (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) + where L::Target: Logger { + // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc` + // (see equivalent if condition there). + assert!(self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateFailed as u32) != 0); + let mon_update_id = self.latest_monitor_update_id; // Forget the ChannelMonitor update + let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger); + self.latest_monitor_update_id = mon_update_id; + if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp { + assert!(msg.is_none()); // The HTLC must have ended up in the holding cell. + } + } + fn get_update_fulfill_htlc(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger { // Either ChannelFunded got set (which means it won't be unset) or there is no way any // caller thought we could have something claimed (cause we wouldn't have accepted in an @@ -1765,6 +1787,10 @@ impl Channel { }; if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateFailed as u32)) != 0 { + // Note that this condition is the same as the assertion in + // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly - + // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we + // do not not get into this branch. for pending_update in self.holding_cell_htlc_updates.iter() { match pending_update { &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => { diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 71ae6170aa6..a62f44806d4 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -6698,7 +6698,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> // payments which are still in-flight via their on-chain state. // We only rebuild the pending payments map if we were most recently serialized by // 0.0.102+ - for (_, monitor) in args.channel_monitors { + for (_, monitor) in args.channel_monitors.iter() { if by_id.get(&monitor.get_funding_txo().0.to_channel_id()).is_none() { for (htlc_source, htlc) in monitor.get_pending_outbound_htlcs() { if let HTLCSource::OutboundRoute { payment_id, session_priv, path, payment_secret, .. } = htlc_source { @@ -6824,6 +6824,38 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> } } + for (_, monitor) in args.channel_monitors.iter() { + for (payment_hash, payment_preimage) in monitor.get_stored_preimages() { + if let Some(claimable_htlcs) = claimable_htlcs.remove(&payment_hash) { + log_info!(args.logger, "Re-claimaing HTLCs with payment hash {} due to partial-claim.", log_bytes!(payment_hash.0)); + for claimable_htlc in claimable_htlcs.1 { + // Add a holding-cell claim of the payment to the Channel, which should be + // applied ~immediately on peer reconnection. Because it won't generate a + // new commitment transaction we can just provide the payment preimage to + // the corresponding ChannelMonitor and nothing else. + // + // We do so directly instead of via the normal ChannelMonitor update + // procedure as the ChainMonitor hasn't yet been initialized, implying + // we're not allowed to call it directly yet. Further, we do the update + // without incrementing the ChannelMonitor update ID as there isn't any + // reason to. + // If we were to generate a new ChannelMonitor update ID here and then + // crash before the user finishes block connect we'd end up force-closing + // this channel as well. On the flip side, there's no harm in restarting + // without the new monitor persisted - we'll end up right back here on + // restart. + let previous_channel_id = claimable_htlc.prev_hop.outpoint.to_channel_id(); + if let Some(channel) = by_id.get_mut(&previous_channel_id) { + channel.claim_htlc_while_disconnected_dropping_mon_update(claimable_htlc.prev_hop.htlc_id, payment_preimage, &args.logger); + } + if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.outpoint) { + previous_hop_monitor.provide_payment_preimage(&payment_hash, &payment_preimage, &args.tx_broadcaster, &args.fee_estimator, &args.logger); + } + } + } + } + } + let channel_manager = ChannelManager { genesis_hash, fee_estimator: args.fee_estimator, diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index c0e33e5b93a..15e75db8979 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1476,7 +1476,7 @@ pub fn send_along_route_with_secret<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, payment_id } -pub fn pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path: &[&Node<'a, 'b, 'c>], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: Option, ev: MessageSendEvent, payment_received_expected: bool, expected_preimage: Option) { +pub fn do_pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path: &[&Node<'a, 'b, 'c>], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: Option, ev: MessageSendEvent, payment_received_expected: bool, clear_recipient_events: bool, expected_preimage: Option) { let mut payment_event = SendEvent::from_event(ev); let mut prev_node = origin_node; @@ -1489,7 +1489,7 @@ pub fn pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path expect_pending_htlcs_forwardable!(node); - if idx == expected_path.len() - 1 { + if idx == expected_path.len() - 1 && clear_recipient_events { let events_2 = node.node.get_and_clear_pending_events(); if payment_received_expected { assert_eq!(events_2.len(), 1); @@ -1513,7 +1513,7 @@ pub fn pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path } else { assert!(events_2.is_empty()); } - } else { + } else if idx != expected_path.len() - 1 { let mut events_2 = node.node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); check_added_monitors!(node, 1); @@ -1525,6 +1525,10 @@ pub fn pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path } } +pub fn pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path: &[&Node<'a, 'b, 'c>], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: Option, ev: MessageSendEvent, payment_received_expected: bool, expected_preimage: Option) { + do_pass_along_path(origin_node, expected_path, recv_value, our_payment_hash, our_payment_secret, ev, payment_received_expected, true, expected_preimage); +} + pub fn pass_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&[&Node<'a, 'b, 'c>]], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: PaymentSecret) { let mut events = origin_node.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), expected_route.len()); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 48b4b07c7d7..e840bef4aac 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -9843,6 +9843,186 @@ fn test_keysend_payments_to_private_node() { claim_payment(&nodes[0], &path, test_preimage); } +fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { + // Test what happens if a node receives an MPP payment, claims it, but crashes before + // persisting the ChannelManager. If `persist_both_monitors` is false, also crash after only + // updating one of the two channels' ChannelMonitors. As a result, on startup, we'll (a) still + // have the PaymentReceived event, (b) have one (or two) channel(s) that goes on chain with the + // HTLC preimage in them, and (c) optionally have one channel that is live off-chain but does + // not have the preimage tied to the still-pending HTLC. + // + // To get to the correct state, on startup we should propagate the preimage to the + // still-off-chain channel, claiming the HTLC as soon as the peer connects, with the monitor + // receiving the preimage without a state update. + let chanmon_cfgs = create_chanmon_cfgs(4); + let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); + + let persister: test_utils::TestPersister; + let new_chain_monitor: test_utils::TestChainMonitor; + let nodes_3_deserialized: ChannelManager; + + let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs); + + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0, InitFeatures::known(), InitFeatures::known()); + create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0, InitFeatures::known(), InitFeatures::known()); + let chan_id_persisted = create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known()).2; + let chan_id_not_persisted = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known()).2; + + // Create an MPP route for 15k sats, more than the default htlc-max of 10% + let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 15_000_000); + assert_eq!(route.paths.len(), 2); + route.paths.sort_by(|path_a, _| { + // Sort the path so that the path through nodes[1] comes first + if path_a[0].pubkey == nodes[1].node.get_our_node_id() { + core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater } + }); + + nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap(); + check_added_monitors!(nodes[0], 2); + + // Send the payment through to nodes[3] *without* clearing the PaymentReceived event + let mut send_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(send_events.len(), 2); + do_pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[0].clone(), true, false, None); + do_pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[1].clone(), true, false, None); + + // Now that we have an MPP payment pending, get the latest encoded copies of nodes[3]'s + // monitors and ChannelManager, for use later, if we don't want to persist both monitors. + let mut original_monitor = test_utils::TestVecWriter(Vec::new()); + if !persist_both_monitors { + for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() { + if outpoint.to_channel_id() == chan_id_not_persisted { + assert!(original_monitor.0.is_empty()); + nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut original_monitor).unwrap(); + } + } + } + + let mut original_manager = test_utils::TestVecWriter(Vec::new()); + nodes[3].node.write(&mut original_manager).unwrap(); + + expect_payment_received!(nodes[3], payment_hash, payment_secret, 15_000_000); + + nodes[3].node.claim_funds(payment_preimage); + check_added_monitors!(nodes[3], 2); + + // Now fetch one of the two updated ChannelMonitors from nodes[3], and restart pretending we + // crashed in between the two persistence calls - using one old ChannelMonitor and one new one, + // with the old ChannelManager. + let mut updated_monitor = test_utils::TestVecWriter(Vec::new()); + for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() { + if outpoint.to_channel_id() == chan_id_persisted { + assert!(updated_monitor.0.is_empty()); + nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut updated_monitor).unwrap(); + } + } + // If `persist_both_monitors` is set, get the second monitor here as well + if persist_both_monitors { + for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() { + if outpoint.to_channel_id() == chan_id_not_persisted { + assert!(original_monitor.0.is_empty()); + nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut original_monitor).unwrap(); + } + } + } + + // Now restart nodes[3]. + persister = test_utils::TestPersister::new(); + let keys_manager = &chanmon_cfgs[3].keys_manager; + new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[3].chain_source), nodes[3].tx_broadcaster.clone(), nodes[3].logger, node_cfgs[3].fee_estimator, &persister, keys_manager); + nodes[3].chain_monitor = &new_chain_monitor; + let mut monitors = Vec::new(); + for mut monitor_data in [original_monitor, updated_monitor].iter() { + let (_, mut deserialized_monitor) = <(BlockHash, ChannelMonitor)>::read(&mut &monitor_data.0[..], keys_manager).unwrap(); + monitors.push(deserialized_monitor); + } + + let config = UserConfig::default(); + nodes_3_deserialized = { + let mut channel_monitors = HashMap::new(); + for monitor in monitors.iter_mut() { + channel_monitors.insert(monitor.get_funding_txo().0, monitor); + } + <(BlockHash, ChannelManager)>::read(&mut &original_manager.0[..], ChannelManagerReadArgs { + default_config: config, + keys_manager, + fee_estimator: node_cfgs[3].fee_estimator, + chain_monitor: nodes[3].chain_monitor, + tx_broadcaster: nodes[3].tx_broadcaster.clone(), + logger: nodes[3].logger, + channel_monitors, + }).unwrap().1 + }; + nodes[3].node = &nodes_3_deserialized; + + for monitor in monitors { + // On startup the preimage should have been copied into the non-persisted monitor: + assert!(monitor.get_stored_preimages().contains_key(&payment_hash)); + nodes[3].chain_monitor.watch_channel(monitor.get_funding_txo().0.clone(), monitor).unwrap(); + } + check_added_monitors!(nodes[3], 2); + + nodes[1].node.peer_disconnected(&nodes[3].node.get_our_node_id(), false); + nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), false); + + // During deserialization, we should have closed one channel and broadcast its latest + // commitment transaction. We should also still have the original PaymentReceived event we + // never finished processing. + let events = nodes[3].node.get_and_clear_pending_events(); + assert_eq!(events.len(), if persist_both_monitors { 3 } else { 2 }); + if let Event::PaymentReceived { amt: 15_000_000, .. } = events[0] { } else { panic!(); } + if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[1] { } else { panic!(); } + if persist_both_monitors { + if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[2] { } else { panic!(); } + } + + assert_eq!(nodes[3].node.list_channels().len(), if persist_both_monitors { 0 } else { 1 }); + if !persist_both_monitors { + // If one of the two channels is still live, reveal the payment preimage over it. + + nodes[3].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None }); + let reestablish_1 = get_chan_reestablish_msgs!(nodes[3], nodes[2]); + nodes[2].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None }); + let reestablish_2 = get_chan_reestablish_msgs!(nodes[2], nodes[3]); + + nodes[2].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish_1[0]); + get_event_msg!(nodes[2], MessageSendEvent::SendChannelUpdate, nodes[3].node.get_our_node_id()); + assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty()); + + nodes[3].node.handle_channel_reestablish(&nodes[2].node.get_our_node_id(), &reestablish_2[0]); + + // Once we call `get_and_clear_pending_msg_events` the holding cell is cleared and the HTLC + // claim should fly. + let ds_msgs = nodes[3].node.get_and_clear_pending_msg_events(); + check_added_monitors!(nodes[3], 1); + assert_eq!(ds_msgs.len(), 2); + if let MessageSendEvent::SendChannelUpdate { .. } = ds_msgs[1] {} else { panic!(); } + + let cs_updates = match ds_msgs[0] { + MessageSendEvent::UpdateHTLCs { ref updates, .. } => { + nodes[2].node.handle_update_fulfill_htlc(&nodes[3].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + check_added_monitors!(nodes[2], 1); + let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id()); + expect_payment_forwarded!(nodes[2], nodes[0], nodes[3], Some(1000), false, false); + commitment_signed_dance!(nodes[2], nodes[3], updates.commitment_signed, false, true); + cs_updates + } + _ => panic!(), + }; + + nodes[0].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[2], cs_updates.commitment_signed, false, true); + expect_payment_sent!(nodes[0], payment_preimage); + } +} + +#[test] +fn test_partial_claim_before_restart() { + do_test_partial_claim_before_restart(false); + do_test_partial_claim_before_restart(true); +} + /// The possible events which may trigger a `max_dust_htlc_exposure` breach #[derive(Clone, Copy, PartialEq)] enum ExposureEvent { From 0a2a40c4fd05f39b9485cc0f000f7066636783cb Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 18 Apr 2022 20:12:15 +0000 Subject: [PATCH 4/9] Add a `PaymentClaimed` event to indicate a payment was claimed This replaces the return value of `claim_funds` with an event. It does not yet change behavior in any material way. --- fuzz/src/chanmon_consistency.rs | 3 +- lightning/src/chain/chainmonitor.rs | 11 ++- lightning/src/ln/chanmon_update_fail_tests.rs | 57 ++++++++---- lightning/src/ln/channelmanager.rs | 36 +++++--- lightning/src/ln/functional_test_utils.rs | 34 ++++++- lightning/src/ln/functional_tests.rs | 92 ++++++++++++------- lightning/src/ln/monitor_tests.rs | 10 +- lightning/src/ln/payment_tests.rs | 11 ++- lightning/src/ln/reorg_tests.rs | 10 +- lightning/src/ln/shutdown_tests.rs | 14 ++- lightning/src/util/events.rs | 56 ++++++++++- 11 files changed, 248 insertions(+), 86 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 1b662c0e721..2fe61aac42f 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -842,11 +842,12 @@ pub fn do_test(data: &[u8], underlying_out: Out) { if $fail { assert!(nodes[$node].fail_htlc_backwards(&payment_hash)); } else { - assert!(nodes[$node].claim_funds(PaymentPreimage(payment_hash.0))); + nodes[$node].claim_funds(PaymentPreimage(payment_hash.0)); } } }, events::Event::PaymentSent { .. } => {}, + events::Event::PaymentClaimed { .. } => {}, events::Event::PaymentPathSuccessful { .. } => {}, events::Event::PaymentPathFailed { .. } => {}, events::Event::PaymentForwarded { .. } if $node == 1 => {}, diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 503e6bdee06..e6b5733520a 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -731,7 +731,7 @@ impl even mod tests { use bitcoin::BlockHeader; use ::{check_added_monitors, check_closed_broadcast, check_closed_event}; - use ::{expect_payment_sent, expect_payment_sent_without_paths, expect_payment_path_successful, get_event_msg}; + use ::{expect_payment_sent, expect_payment_claimed, expect_payment_sent_without_paths, expect_payment_path_successful, get_event_msg}; use ::{get_htlc_update_msgs, get_local_commitment_txn, get_revoke_commit_msgs, get_route_and_payment_hash, unwrap_send_err}; use chain::{ChannelMonitorUpdateErr, Confirm, Watch}; use chain::channelmonitor::LATENCY_GRACE_PERIOD_BLOCKS; @@ -798,16 +798,18 @@ mod tests { create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); // Route two payments to be claimed at the same time. - let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 1_000_000).0; - let payment_preimage_2 = route_payment(&nodes[0], &[&nodes[1]], 1_000_000).0; + let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clear(); chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); nodes[1].node.claim_funds(payment_preimage_1); check_added_monitors!(nodes[1], 1); + expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); nodes[1].node.claim_funds(payment_preimage_2); check_added_monitors!(nodes[1], 1); + expect_payment_claimed!(nodes[1], payment_hash_2, 1_000_000); chanmon_cfgs[1].persister.set_update_ret(Ok(())); @@ -877,8 +879,9 @@ mod tests { let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); // First route a payment that we will claim on chain and give the recipient the preimage. - let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 1_000_000).0; + let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); nodes[1].node.claim_funds(payment_preimage); + expect_payment_claimed!(nodes[1], payment_hash, 1_000_000); nodes[1].node.get_and_clear_pending_msg_events(); check_added_monitors!(nodes[1], 1); let remote_txn = get_local_commitment_txn!(nodes[1], channel.2); diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 91688cc4fa3..52441629aa0 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -89,7 +89,7 @@ fn test_monitor_and_persister_update_fail() { send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000); // Route an HTLC from node 0 to node 1 (but don't settle) - let preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0; + let (preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000); // Make a copy of the ChainMonitor so we can capture the error it returns on a // bogus update. Note that if instead we updated the nodes[0]'s ChainMonitor @@ -123,8 +123,10 @@ fn test_monitor_and_persister_update_fail() { persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); // Try to update ChannelMonitor - assert!(nodes[1].node.claim_funds(preimage)); + nodes[1].node.claim_funds(preimage); + expect_payment_claimed!(nodes[1], payment_hash, 9_000_000); check_added_monitors!(nodes[1], 1); + let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); @@ -267,7 +269,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; - let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); + let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); // Now try to send a second payment which will fail to send let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); @@ -283,8 +285,10 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { // Claim the previous payment, which will result in a update_fulfill_htlc/CS from nodes[1] // but nodes[0] won't respond since it is frozen. - assert!(nodes[1].node.claim_funds(payment_preimage_1)); + nodes[1].node.claim_funds(payment_preimage_1); check_added_monitors!(nodes[1], 1); + expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); + let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); let (bs_initial_fulfill, bs_initial_commitment_signed) = match events_2[0] { @@ -1088,13 +1092,15 @@ fn test_monitor_update_fail_reestablish() { let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); - let (payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); + let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - assert!(nodes[2].node.claim_funds(payment_preimage)); + nodes[2].node.claim_funds(payment_preimage); check_added_monitors!(nodes[2], 1); + expect_payment_claimed!(nodes[2], payment_hash, 1_000_000); + let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); @@ -1292,13 +1298,14 @@ fn claim_while_disconnected_monitor_update_fail() { let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; // Forward a payment for B to claim - let (payment_preimage_1, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); + let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - assert!(nodes[1].node.claim_funds(payment_preimage_1)); + nodes[1].node.claim_funds(payment_preimage_1); check_added_monitors!(nodes[1], 1); + expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None }); nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None }); @@ -1578,10 +1585,11 @@ fn test_monitor_update_fail_claim() { // Rebalance a bit so that we can send backwards from 3 to 2. send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000); - let (payment_preimage_1, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); + let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); - assert!(nodes[1].node.claim_funds(payment_preimage_1)); + nodes[1].node.claim_funds(payment_preimage_1); + expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[1], 1); @@ -1754,7 +1762,7 @@ fn monitor_update_claim_fail_no_response() { let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; // Forward a payment for B to claim - let (payment_preimage_1, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); + let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); // Now start forwarding a second payment, skipping the last RAA so B is in AwaitingRAA let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); @@ -1770,8 +1778,10 @@ fn monitor_update_claim_fail_no_response() { let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true); chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); - assert!(nodes[1].node.claim_funds(payment_preimage_1)); + nodes[1].node.claim_funds(payment_preimage_1); + expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); check_added_monitors!(nodes[1], 1); + let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 0); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1); @@ -2076,13 +2086,15 @@ fn test_fail_htlc_on_broadcast_after_claim() { create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()).2; - let payment_preimage = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 2000).0; + let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 2000); let bs_txn = get_local_commitment_txn!(nodes[2], chan_id_2); assert_eq!(bs_txn.len(), 1); nodes[2].node.claim_funds(payment_preimage); check_added_monitors!(nodes[2], 1); + expect_payment_claimed!(nodes[2], payment_hash, 2000); + let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]); let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -2235,7 +2247,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { // // Note that because, at the end, MonitorUpdateFailed is still set, the HTLC generated in (c) // will not be freed from the holding cell. - let (payment_preimage_0, _, _) = route_payment(&nodes[1], &[&nodes[0]], 100000); + let (payment_preimage_0, payment_hash_0, _) = route_payment(&nodes[1], &[&nodes[0]], 100_000); nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)).unwrap(); check_added_monitors!(nodes[0], 1); @@ -2246,8 +2258,9 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { check_added_monitors!(nodes[0], 0); chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); - assert!(nodes[0].node.claim_funds(payment_preimage_0)); + nodes[0].node.claim_funds(payment_preimage_0); check_added_monitors!(nodes[0], 1); + expect_payment_claimed!(nodes[0], payment_hash_0, 100_000); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send.msgs[0]); nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send.commitment_msg); @@ -2460,8 +2473,10 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f check_added_monitors!(nodes[2], 1); get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); } else { - assert!(nodes[2].node.claim_funds(payment_preimage)); + nodes[2].node.claim_funds(payment_preimage); check_added_monitors!(nodes[2], 1); + expect_payment_claimed!(nodes[2], payment_hash, 100_000); + let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); assert_eq!(cs_updates.update_fulfill_htlcs.len(), 1); // Check that the message we're about to deliver matches the one generated: @@ -2630,20 +2645,22 @@ fn double_temp_error() { let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let (payment_preimage_1, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); - let (payment_preimage_2, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); + let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); // `claim_funds` results in a ChannelMonitorUpdate. - assert!(nodes[1].node.claim_funds(payment_preimage_1)); + nodes[1].node.claim_funds(payment_preimage_1); check_added_monitors!(nodes[1], 1); + expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); let (funding_tx, latest_update_1, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); // Previously, this would've panicked due to a double-call to `Channel::monitor_update_failed`, // which had some asserts that prevented it from being called twice. - assert!(nodes[1].node.claim_funds(payment_preimage_2)); + nodes[1].node.claim_funds(payment_preimage_2); check_added_monitors!(nodes[1], 1); + expect_payment_claimed!(nodes[1], payment_hash_2, 1_000_000); chanmon_cfgs[1].persister.set_update_ret(Ok(())); let (_, latest_update_2, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index a62f44806d4..98714252f5e 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3788,26 +3788,29 @@ impl ChannelMana /// Provides a payment preimage in response to [`Event::PaymentReceived`], generating any /// [`MessageSendEvent`]s needed to claim the payment. /// + /// Note that calling this method does *not* guarantee that the payment has been claimed. You + /// *must* wait for an [`Event::PaymentClaimed`] event which upon a successful claim will be + /// provided to your [`EventHandler`] when [`process_pending_events`] is next called. + /// /// Note that if you did not set an `amount_msat` when calling [`create_inbound_payment`] or /// [`create_inbound_payment_for_hash`] you must check that the amount in the `PaymentReceived` /// event matches your expectation. If you fail to do so and call this method, you may provide /// the sender "proof-of-payment" when they did not fulfill the full expected payment. /// - /// Returns whether any HTLCs were claimed, and thus if any new [`MessageSendEvent`]s are now - /// pending for processing via [`get_and_clear_pending_msg_events`]. - /// /// [`Event::PaymentReceived`]: crate::util::events::Event::PaymentReceived + /// [`Event::PaymentClaimed`]: crate::util::events::Event::PaymentClaimed + /// [`process_pending_events`]: EventsProvider::process_pending_events /// [`create_inbound_payment`]: Self::create_inbound_payment /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash /// [`get_and_clear_pending_msg_events`]: MessageSendEventsProvider::get_and_clear_pending_msg_events - pub fn claim_funds(&self, payment_preimage: PaymentPreimage) -> bool { + pub fn claim_funds(&self, payment_preimage: PaymentPreimage) { let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner()); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let mut channel_state = Some(self.channel_state.lock().unwrap()); let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&payment_hash); - if let Some((_, mut sources)) = removed_source { + if let Some((payment_purpose, mut sources)) = removed_source { assert!(!sources.is_empty()); // If we are claiming an MPP payment, we have to take special care to ensure that each @@ -3821,12 +3824,14 @@ impl ChannelMana // we got all the HTLCs and then a channel closed while we were waiting for the user to // provide the preimage, so worrying too much about the optimal handling isn't worth // it. + let mut claimable_amt_msat = 0; let mut valid_mpp = true; for htlc in sources.iter() { if let None = channel_state.as_ref().unwrap().short_to_id.get(&htlc.prev_hop.short_channel_id) { valid_mpp = false; break; } + claimable_amt_msat += htlc.value; } let mut errs = Vec::new(); @@ -3862,6 +3867,14 @@ impl ChannelMana } } + if claimed_any_htlcs { + self.pending_events.lock().unwrap().push(events::Event::PaymentClaimed { + payment_hash, + purpose: payment_purpose, + amt: claimable_amt_msat, + }); + } + // Now that we've done the entire above loop in one lock, we can handle any errors // which were generated. channel_state.take(); @@ -3870,9 +3883,7 @@ impl ChannelMana let res: Result<(), _> = Err(err); let _ = handle_error!(self, res, counterparty_node_id); } - - claimed_any_htlcs - } else { false } + } } fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop { @@ -6827,7 +6838,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> for (_, monitor) in args.channel_monitors.iter() { for (payment_hash, payment_preimage) in monitor.get_stored_preimages() { if let Some(claimable_htlcs) = claimable_htlcs.remove(&payment_hash) { - log_info!(args.logger, "Re-claimaing HTLCs with payment hash {} due to partial-claim.", log_bytes!(payment_hash.0)); + log_info!(args.logger, "Re-claiming HTLCs with payment hash {} as we've released the preimage to a ChannelMonitor!", log_bytes!(payment_hash.0)); for claimable_htlc in claimable_htlcs.1 { // Add a holding-cell claim of the payment to the Channel, which should be // applied ~immediately on peer reconnection. Because it won't generate a @@ -7109,8 +7120,10 @@ mod tests { // claim_funds_along_route because the ordering of the messages causes the second half of the // payment to be put in the holding cell, which confuses the test utilities. So we exchange the // lightning messages manually. - assert!(nodes[1].node.claim_funds(payment_preimage)); + nodes[1].node.claim_funds(payment_preimage); + expect_payment_claimed!(nodes[1], our_payment_hash, 200_000); check_added_monitors!(nodes[1], 2); + let bs_first_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_first_updates.update_fulfill_htlcs[0]); nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_updates.commitment_signed); @@ -7556,7 +7569,8 @@ pub mod bench { expect_pending_htlcs_forwardable!(NodeHolder { node: &$node_b }); expect_payment_received!(NodeHolder { node: &$node_b }, payment_hash, payment_secret, 10_000); - assert!($node_b.claim_funds(payment_preimage)); + $node_b.claim_funds(payment_preimage); + expect_payment_claimed!(NodeHolder { node: &$node_b }, payment_hash, 10_000); match $node_b.get_and_clear_pending_msg_events().pop().unwrap() { MessageSendEvent::UpdateHTLCs { node_id, updates } => { diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 15e75db8979..458601542ca 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -34,6 +34,8 @@ use bitcoin::blockdata::transaction::{Transaction, TxOut}; use bitcoin::network::constants::Network; use bitcoin::hash_types::BlockHash; +use bitcoin::hashes::sha256::Hash as Sha256; +use bitcoin::hashes::Hash as _; use bitcoin::secp256k1::PublicKey; @@ -1266,6 +1268,22 @@ macro_rules! expect_payment_received { } } +#[macro_export] +#[cfg(any(test, feature = "_bench_unstable", feature = "_test_utils"))] +macro_rules! expect_payment_claimed { + ($node: expr, $expected_payment_hash: expr, $expected_recv_value: expr) => { + let events = $node.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + $crate::util::events::Event::PaymentClaimed { ref payment_hash, amt, .. } => { + assert_eq!($expected_payment_hash, *payment_hash); + assert_eq!($expected_recv_value, amt); + }, + _ => panic!("Unexpected event"), + } + } +} + #[cfg(test)] #[macro_export] macro_rules! expect_payment_sent_without_paths { @@ -1550,7 +1568,19 @@ pub fn do_claim_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, for path in expected_paths.iter() { assert_eq!(path.last().unwrap().node.get_our_node_id(), expected_paths[0].last().unwrap().node.get_our_node_id()); } - assert!(expected_paths[0].last().unwrap().node.claim_funds(our_payment_preimage)); + expected_paths[0].last().unwrap().node.claim_funds(our_payment_preimage); + + let claim_event = expected_paths[0].last().unwrap().node.get_and_clear_pending_events(); + assert_eq!(claim_event.len(), 1); + match claim_event[0] { + Event::PaymentClaimed { purpose: PaymentPurpose::SpontaneousPayment(preimage), .. }| + Event::PaymentClaimed { purpose: PaymentPurpose::InvoicePayment { payment_preimage: Some(preimage), ..}, .. } => + assert_eq!(preimage, our_payment_preimage), + Event::PaymentClaimed { purpose: PaymentPurpose::InvoicePayment { .. }, payment_hash, .. } => + assert_eq!(&payment_hash.0, &Sha256::hash(&our_payment_preimage.0)[..]), + _ => panic!(), + } + check_added_monitors!(expected_paths[0].last().unwrap(), expected_paths.len()); let mut expected_total_fee_msat = 0; @@ -1645,7 +1675,7 @@ pub fn do_claim_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, } // Ensure that claim_funds is idempotent. - assert!(!expected_paths[0].last().unwrap().node.claim_funds(our_payment_preimage)); + expected_paths[0].last().unwrap().node.claim_funds(our_payment_preimage); assert!(expected_paths[0].last().unwrap().node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(expected_paths[0].last().unwrap(), 0); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index e840bef4aac..ed16a1c025b 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -1259,6 +1259,7 @@ fn test_duplicate_htlc_different_direction_onchain() { // Provide preimage to node 0 by claiming payment nodes[0].node.claim_funds(payment_preimage); + expect_payment_claimed!(nodes[0], payment_hash, 800_000); check_added_monitors!(nodes[0], 1); // Broadcast node 1 commitment txn @@ -2031,8 +2032,9 @@ fn channel_reserve_in_flight_removes() { let b_chan_values = get_channel_value_stat!(nodes[1], chan_1.2); // Route the first two HTLCs. - let (payment_preimage_1, _, _) = route_payment(&nodes[0], &[&nodes[1]], b_chan_values.channel_reserve_msat - b_chan_values.value_to_self_msat - 10000); - let (payment_preimage_2, _, _) = route_payment(&nodes[0], &[&nodes[1]], 20000); + let payment_value_1 = b_chan_values.channel_reserve_msat - b_chan_values.value_to_self_msat - 10000; + let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], payment_value_1); + let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], 20_000); // Start routing the third HTLC (this is just used to get everyone in the right state). let (route, payment_hash_3, payment_preimage_3, payment_secret_3) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); @@ -2046,13 +2048,15 @@ fn channel_reserve_in_flight_removes() { // Now claim both of the first two HTLCs on B's end, putting B in AwaitingRAA and generating an // initial fulfill/CS. - assert!(nodes[1].node.claim_funds(payment_preimage_1)); + nodes[1].node.claim_funds(payment_preimage_1); + expect_payment_claimed!(nodes[1], payment_hash_1, payment_value_1); check_added_monitors!(nodes[1], 1); let bs_removes = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); // This claim goes in B's holding cell, allowing us to have a pending B->A RAA which does not // remove the second HTLC when we send the HTLC back from B to A. - assert!(nodes[1].node.claim_funds(payment_preimage_2)); + nodes[1].node.claim_funds(payment_preimage_2); + expect_payment_claimed!(nodes[1], payment_hash_2, 20_000); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -2195,7 +2199,7 @@ fn channel_monitor_network_test() { check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed); // One pending HTLC is discarded by the force-close: - let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 3000000).0; + let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000); // Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not // broadcasted until we reach the timelock time). @@ -2217,9 +2221,10 @@ fn channel_monitor_network_test() { check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed); macro_rules! claim_funds { - ($node: expr, $prev_node: expr, $preimage: expr) => { + ($node: expr, $prev_node: expr, $preimage: expr, $payment_hash: expr) => { { - assert!($node.node.claim_funds($preimage)); + $node.node.claim_funds($preimage); + expect_payment_claimed!($node, $payment_hash, 3_000_000); check_added_monitors!($node, 1); let events = $node.node.get_and_clear_pending_msg_events(); @@ -2249,7 +2254,7 @@ fn channel_monitor_network_test() { node2_commitment_txid = node_txn[0].txid(); // Claim the payment on nodes[3], giving it knowledge of the preimage - claim_funds!(nodes[3], nodes[2], payment_preimage_1); + claim_funds!(nodes[3], nodes[2], payment_preimage_1, payment_hash_1); mine_transaction(&nodes[3], &node_txn[0]); check_added_monitors!(nodes[3], 1); check_preimage_claim(&nodes[3], &node_txn); @@ -2265,7 +2270,7 @@ fn channel_monitor_network_test() { let chan_3_mon = nodes[3].chain_monitor.chain_monitor.remove_monitor(&OutPoint { txid: chan_3.3.txid(), index: 0 }); // One pending HTLC to time out: - let payment_preimage_2 = route_payment(&nodes[3], &vec!(&nodes[4])[..], 3000000).0; + let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[3], &[&nodes[4]], 3_000_000); // CLTV expires at TEST_FINAL_CLTV + 1 (current height) + 1 (added in send_payment for // buffer space). @@ -2300,7 +2305,7 @@ fn channel_monitor_network_test() { let node_txn = test_txn_broadcast(&nodes[3], &chan_4, None, HTLCType::TIMEOUT); // Claim the payment on nodes[4], giving it knowledge of the preimage - claim_funds!(nodes[4], nodes[3], payment_preimage_2); + claim_funds!(nodes[4], nodes[3], payment_preimage_2, payment_hash_2); connect_blocks(&nodes[4], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2); let events = nodes[4].node.get_and_clear_pending_msg_events(); @@ -2655,8 +2660,8 @@ fn test_htlc_on_chain_success() { send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000); send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000); - let (our_payment_preimage, payment_hash_1, _payment_secret) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000); - let (our_payment_preimage_2, payment_hash_2, _payment_secret_2) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000); + let (our_payment_preimage, payment_hash_1, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000); + let (our_payment_preimage_2, payment_hash_2, _payment_secret_2) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000); // Broadcast legit commitment tx from C on B's chain // Broadcast HTLC Success transaction by C on received output from C's commitment tx on B's chain @@ -2664,7 +2669,9 @@ fn test_htlc_on_chain_success() { assert_eq!(commitment_tx.len(), 1); check_spends!(commitment_tx[0], chan_2.3); nodes[2].node.claim_funds(our_payment_preimage); + expect_payment_claimed!(nodes[2], payment_hash_1, 3_000_000); nodes[2].node.claim_funds(our_payment_preimage_2); + expect_payment_claimed!(nodes[2], payment_hash_2, 3_000_000); check_added_monitors!(nodes[2], 2); let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -3476,9 +3483,10 @@ fn test_dup_events_on_peer_disconnect() { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 1000000).0; + let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); - assert!(nodes[1].node.claim_funds(payment_preimage)); + nodes[1].node.claim_funds(payment_preimage); + expect_payment_claimed!(nodes[1], payment_hash, 1_000_000); check_added_monitors!(nodes[1], 1); let claim_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &claim_msgs.update_fulfill_htlcs[0]); @@ -3612,7 +3620,7 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); } - let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1_000_000); let payment_event = { nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)).unwrap(); @@ -3717,6 +3725,7 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken nodes[1].node.claim_funds(payment_preimage_1); check_added_monitors!(nodes[1], 1); + expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); let events_3 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_3.len(), 1); @@ -4054,7 +4063,7 @@ fn test_drop_messages_peer_disconnect_dual_htlc() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); + let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); // Now try to send a second payment which will fail to send let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); @@ -4068,7 +4077,8 @@ fn test_drop_messages_peer_disconnect_dual_htlc() { _ => panic!("Unexpected event"), } - assert!(nodes[1].node.claim_funds(payment_preimage_1)); + nodes[1].node.claim_funds(payment_preimage_1); + expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); check_added_monitors!(nodes[1], 1); let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); @@ -4844,14 +4854,15 @@ fn test_static_spendable_outputs_preimage_tx() { // Create some initial channels let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; + let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000); let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2); assert_eq!(commitment_tx[0].input.len(), 1); assert_eq!(commitment_tx[0].input[0].previous_output.txid, chan_1.3.txid()); // Settle A's commitment tx on B's chain - assert!(nodes[1].node.claim_funds(payment_preimage)); + nodes[1].node.claim_funds(payment_preimage); + expect_payment_claimed!(nodes[1], payment_hash, 3_000_000); check_added_monitors!(nodes[1], 1); mine_transaction(&nodes[1], &commitment_tx[0]); check_added_monitors!(nodes[1], 1); @@ -5144,10 +5155,11 @@ fn test_onchain_to_onchain_claim() { send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000); send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000); - let (payment_preimage, _payment_hash, _payment_secret) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000); + let (payment_preimage, payment_hash, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000); let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2); check_spends!(commitment_tx[0], chan_2.3); nodes[2].node.claim_funds(payment_preimage); + expect_payment_claimed!(nodes[2], payment_hash, 3_000_000); check_added_monitors!(nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -5262,7 +5274,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() { connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1); connect_blocks(&nodes[3], node_max_height - nodes[3].best_block_info().1); - let (our_payment_preimage, duplicate_payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 900000); + let (our_payment_preimage, duplicate_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 900_000); let payment_secret = nodes[3].node.create_inbound_payment_for_hash(duplicate_payment_hash, None, 7200).unwrap(); // We reduce the final CLTV here by a somewhat arbitrary constant to keep it under the one-byte @@ -5305,6 +5317,8 @@ fn test_duplicate_payment_hash_one_failure_one_success() { } nodes[2].node.claim_funds(our_payment_preimage); + expect_payment_claimed!(nodes[2], duplicate_payment_hash, 900_000); + mine_transaction(&nodes[2], &commitment_txn[0]); check_added_monitors!(nodes[2], 2); check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed); @@ -5385,7 +5399,7 @@ fn test_dynamic_spendable_outputs_local_htlc_success_tx() { // Create some initial channels let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000).0; + let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000); let local_txn = get_local_commitment_txn!(nodes[1], chan_1.2); assert_eq!(local_txn.len(), 1); assert_eq!(local_txn[0].input.len(), 1); @@ -5393,7 +5407,9 @@ fn test_dynamic_spendable_outputs_local_htlc_success_tx() { // Give B knowledge of preimage to be able to generate a local HTLC-Success Tx nodes[1].node.claim_funds(payment_preimage); + expect_payment_claimed!(nodes[1], payment_hash, 9_000_000); check_added_monitors!(nodes[1], 1); + mine_transaction(&nodes[1], &local_txn[0]); check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); @@ -5868,12 +5884,13 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let (payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3000000 }); + let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3_000_000 }); // Claim the payment, but don't deliver A's commitment_signed, resulting in the HTLC only being // present in B's local commitment transaction, but none of A's commitment transactions. - assert!(nodes[1].node.claim_funds(payment_preimage)); + nodes[1].node.claim_funds(payment_preimage); check_added_monitors!(nodes[1], 1); + expect_payment_claimed!(nodes[1], payment_hash, if use_dust { 50000 } else { 3_000_000 }); let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]); @@ -6307,6 +6324,8 @@ fn test_free_and_fail_holding_cell_htlcs() { } nodes[1].node.claim_funds(payment_preimage_1); check_added_monitors!(nodes[1], 1); + expect_payment_claimed!(nodes[1], payment_hash_1, amt_1); + let update_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msgs.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], update_msgs.commitment_signed, false, true); @@ -6895,10 +6914,11 @@ fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let our_payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 100000).0; + let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 100_000); nodes[1].node.claim_funds(our_payment_preimage); check_added_monitors!(nodes[1], 1); + expect_payment_claimed!(nodes[1], our_payment_hash, 100_000); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -6937,10 +6957,11 @@ fn test_update_fulfill_htlc_bolt2_wrong_preimage() { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let our_payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 100000).0; + let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 100_000); nodes[1].node.claim_funds(our_payment_preimage); check_added_monitors!(nodes[1], 1); + expect_payment_claimed!(nodes[1], our_payment_hash, 100_000); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -7927,7 +7948,7 @@ fn test_bump_penalty_txn_on_remote_commitment() { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000, InitFeatures::known(), InitFeatures::known()); - let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; + let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000); route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0; // Remote commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC @@ -7938,6 +7959,7 @@ fn test_bump_penalty_txn_on_remote_commitment() { // Claim a HTLC without revocation (provide B monitor with preimage) nodes[1].node.claim_funds(payment_preimage); + expect_payment_claimed!(nodes[1], payment_hash, 3_000_000); mine_transaction(&nodes[1], &remote_txn[0]); check_added_monitors!(nodes[1], 2); connect_blocks(&nodes[1], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires @@ -8113,8 +8135,9 @@ fn test_pending_claimed_htlc_no_balance_underflow() { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0, InitFeatures::known(), InitFeatures::known()); - let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 1_010_000).0; + let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_010_000); nodes[1].node.claim_funds(payment_preimage); + expect_payment_claimed!(nodes[1], payment_hash, 1_010_000); check_added_monitors!(nodes[1], 1); let fulfill_ev = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -8696,7 +8719,7 @@ fn test_update_err_monitor_lockdown() { send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000); // Route a HTLC from node 0 to node 1 (but don't settle) - let preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0; + let (preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000); // Copy ChainMonitor to simulate a watchtower and update block height of node 0 until its ChannelMonitor timeout HTLC onchain let chain_source = test_utils::TestChainSource::new(Network::Testnet); @@ -8720,8 +8743,10 @@ fn test_update_err_monitor_lockdown() { watchtower.chain_monitor.block_connected(&Block { header, txdata: vec![] }, 200); // Try to update ChannelMonitor - assert!(nodes[1].node.claim_funds(preimage)); + nodes[1].node.claim_funds(preimage); check_added_monitors!(nodes[1], 1); + expect_payment_claimed!(nodes[1], payment_hash, 9_000_000); + let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); @@ -8964,7 +8989,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain // Steps (1) and (2): // Send an HTLC Alice --> Bob --> Carol, but Carol doesn't settle the HTLC back. - let (payment_preimage, _payment_hash, _payment_secret) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3_000_000); + let (payment_preimage, payment_hash, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000); // Check that Alice's commitment transaction now contains an output for this HTLC. let alice_txn = get_local_commitment_txn!(nodes[0], chan_ab.2); @@ -9009,8 +9034,10 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain // Step (5): // Carol then claims the funds and sends an update_fulfill message to Bob, and they go through the // process of removing the HTLC from their commitment transactions. - assert!(nodes[2].node.claim_funds(payment_preimage)); + nodes[2].node.claim_funds(payment_preimage); check_added_monitors!(nodes[2], 1); + expect_payment_claimed!(nodes[2], payment_hash, 3_000_000); + let carol_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); assert!(carol_updates.update_add_htlcs.is_empty()); assert!(carol_updates.update_fail_htlcs.is_empty()); @@ -9906,6 +9933,7 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { nodes[3].node.claim_funds(payment_preimage); check_added_monitors!(nodes[3], 2); + expect_payment_claimed!(nodes[3], payment_hash, 15_000_000); // Now fetch one of the two updated ChannelMonitors from nodes[3], and restart pretending we // crashed in between the two persistence calls - using one old ChannelMonitor and one new one, diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index af42644dd71..489626a90f2 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -203,7 +203,7 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) { assert_eq!(funding_outpoint.to_channel_id(), chan_id); // This HTLC is immediately claimed, giving node B the preimage - let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0; + let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000); // This HTLC is allowed to time out, letting A claim it. However, in order to test claimable // balances more fully we also give B the preimage for this HTLC. let (timeout_payment_preimage, timeout_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 4_000_000); @@ -236,13 +236,18 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) { nodes[1].node.claim_funds(payment_preimage); check_added_monitors!(nodes[1], 1); + expect_payment_claimed!(nodes[1], payment_hash, 3_000_000); + let b_htlc_msgs = get_htlc_update_msgs!(&nodes[1], nodes[0].node.get_our_node_id()); // We claim the dust payment here as well, but it won't impact our claimable balances as its // dust and thus doesn't appear on chain at all. nodes[1].node.claim_funds(dust_payment_preimage); check_added_monitors!(nodes[1], 1); + expect_payment_claimed!(nodes[1], dust_payment_hash, 3_000); + nodes[1].node.claim_funds(timeout_payment_preimage); check_added_monitors!(nodes[1], 1); + expect_payment_claimed!(nodes[1], timeout_payment_hash, 4_000_000); if prev_commitment_tx { // To build a previous commitment transaction, deliver one round of commitment messages. @@ -580,9 +585,10 @@ fn test_balances_on_local_commitment_htlcs() { expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_received!(nodes[1], payment_hash_2, payment_secret_2, 20_000_000); - assert!(nodes[1].node.claim_funds(payment_preimage_2)); + nodes[1].node.claim_funds(payment_preimage_2); get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(nodes[1], 1); + expect_payment_claimed!(nodes[1], payment_hash_2, 20_000_000); let chan_feerate = get_feerate!(nodes[0], chan_id) as u64; let opt_anchors = get_opt_anchors!(nodes[0], chan_id); diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 2d57950029e..4fe3cef0d57 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -376,7 +376,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { // Send two payments - one which will get to nodes[2] and will be claimed, one which we'll time // out and retry. let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000); - let (payment_preimage_1, _, _, payment_id_1) = send_along_route(&nodes[0], route.clone(), &[&nodes[1], &nodes[2]], 1_000_000); + let (payment_preimage_1, payment_hash_1, _, payment_id_1) = send_along_route(&nodes[0], route.clone(), &[&nodes[1], &nodes[2]], 1_000_000); let payment_id = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap(); check_added_monitors!(nodes[0], 1); @@ -475,6 +475,8 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { // we close in a moment. nodes[2].node.claim_funds(payment_preimage_1); check_added_monitors!(nodes[2], 1); + expect_payment_claimed!(nodes[2], payment_hash_1, 1_000_000); + let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]); check_added_monitors!(nodes[1], 1); @@ -564,7 +566,7 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co // Route a payment, but force-close the channel before the HTLC fulfill message arrives at // nodes[0]. - let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 10000000); + let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 10_000_000); nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap(); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); @@ -582,8 +584,9 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co check_spends!(node_txn[2], node_txn[1]); let timeout_txn = vec![node_txn[2].clone()]; - assert!(nodes[1].node.claim_funds(payment_preimage)); + nodes[1].node.claim_funds(payment_preimage); check_added_monitors!(nodes[1], 1); + expect_payment_claimed!(nodes[1], payment_hash, 10_000_000); let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[1].clone()]}); @@ -740,6 +743,8 @@ fn test_fulfill_restart_failure() { nodes[1].node.claim_funds(payment_preimage); check_added_monitors!(nodes[1], 1); + expect_payment_claimed!(nodes[1], payment_hash, 100_000); + let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]); expect_payment_sent_without_paths!(nodes[0], payment_preimage); diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index 8a4ec2dc351..d32dae22e8f 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -60,10 +60,11 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); - let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); + let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); // Provide preimage to node 2 by claiming payment nodes[2].node.claim_funds(our_payment_preimage); + expect_payment_claimed!(nodes[2], our_payment_hash, 1_000_000); check_added_monitors!(nodes[2], 1); get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); @@ -348,8 +349,8 @@ fn test_set_outpoints_partial_claiming() { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000, InitFeatures::known(), InitFeatures::known()); - let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000).0; - let payment_preimage_2 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000).0; + let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000); + let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000); // Remote commitment txn with 4 outputs: to_local, to_remote, 2 outgoing HTLC let remote_txn = get_local_commitment_txn!(nodes[1], chan.2); @@ -363,9 +364,10 @@ fn test_set_outpoints_partial_claiming() { // Connect blocks on node A to advance height towards TEST_FINAL_CLTV // Provide node A with both preimage nodes[0].node.claim_funds(payment_preimage_1); + expect_payment_claimed!(nodes[0], payment_hash_1, 3_000_000); nodes[0].node.claim_funds(payment_preimage_2); + expect_payment_claimed!(nodes[0], payment_hash_2, 3_000_000); check_added_monitors!(nodes[0], 2); - nodes[0].node.get_and_clear_pending_events(); nodes[0].node.get_and_clear_pending_msg_events(); // Connect blocks on node A commitment transaction diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index 063fa016086..58d505943a6 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -81,7 +81,7 @@ fn updates_shutdown_wait() { let keys_manager = test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet); let random_seed_bytes = keys_manager.get_secure_random_bytes(); - let (payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000); + let (payment_preimage_0, payment_hash_0, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000); nodes[0].node.close_channel(&chan_1.2, &nodes[1].node.get_our_node_id()).unwrap(); let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); @@ -101,8 +101,10 @@ fn updates_shutdown_wait() { unwrap_send_err!(nodes[0].node.send_payment(&route_1, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable {..}, {}); unwrap_send_err!(nodes[1].node.send_payment(&route_2, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable {..}, {}); - assert!(nodes[2].node.claim_funds(payment_preimage)); + nodes[2].node.claim_funds(payment_preimage_0); check_added_monitors!(nodes[2], 1); + expect_payment_claimed!(nodes[2], payment_hash_0, 100_000); + let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); @@ -122,7 +124,7 @@ fn updates_shutdown_wait() { assert_eq!(updates_2.update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true); - expect_payment_sent!(nodes[0], payment_preimage); + expect_payment_sent!(nodes[0], payment_preimage_0); let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); @@ -230,7 +232,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); - let (payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000); + let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000); nodes[1].node.close_channel(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap(); let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); @@ -270,8 +272,10 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - assert!(nodes[2].node.claim_funds(payment_preimage)); + nodes[2].node.claim_funds(payment_preimage); check_added_monitors!(nodes[2], 1); + expect_payment_claimed!(nodes[2], payment_hash, 100_000); + let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); diff --git a/lightning/src/util/events.rs b/lightning/src/util/events.rs index aebdcf415c7..5c00a05c370 100644 --- a/lightning/src/util/events.rs +++ b/lightning/src/util/events.rs @@ -188,8 +188,9 @@ pub enum Event { /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel user_channel_id: u64, }, - /// Indicates we've received money! Just gotta dig out that payment preimage and feed it to - /// [`ChannelManager::claim_funds`] to get it.... + /// Indicates we've received (an offer of) money! Just gotta dig out that payment preimage and + /// feed it to [`ChannelManager::claim_funds`] to get it.... + /// /// Note that if the preimage is not known, you should call /// [`ChannelManager::fail_htlc_backwards`] to free up resources for this HTLC and avoid /// network congestion. @@ -213,6 +214,30 @@ pub enum Event { /// payment is to pay an invoice or to send a spontaneous payment. purpose: PaymentPurpose, }, + /// Indicates a payment has been claimed and we've received money! + /// + /// This most likely occurs when [`ChannelManager::claim_funds`] has been called in response + /// to an [`Event::PaymentReceived`]. However, if we previously crashed during a + /// [`ChannelManager::claim_funds`] call you may see this event without a corresponding + /// [`Event::PaymentReceived`] event. + /// + /// # Note + /// LDK will not stop an inbound payment from being paid multiple times, so multiple + /// `PaymentReceived` events may be generated for the same payment. If you then call + /// [`ChannelManager::claim_funds`] twice for the same [`Event::PaymentReceived`] you may get + /// multiple `PaymentClaimed` events. + /// + /// [`ChannelManager::claim_funds`]: crate::ln::channelmanager::ChannelManager::claim_funds + PaymentClaimed { + /// The payment hash of the claimed payment. Note that LDK will not stop you from + /// registering duplicate payment hashes for inbound payments. + payment_hash: PaymentHash, + /// The value, in thousandths of a satoshi, that this payment is for. + amt: u64, + /// The purpose of this claimed payment, i.e. whether the payment was for an invoice or a + /// spontaneous payment. + purpose: PaymentPurpose, + }, /// Indicates an outbound payment we made succeeded (i.e. it made it all the way to its target /// and we got back the payment preimage for it). /// @@ -592,6 +617,14 @@ impl Writeable for Event { // We never write the OpenChannelRequest events as, upon disconnection, peers // drop any channels which have not yet exchanged funding_signed. }, + &Event::PaymentClaimed { ref payment_hash, ref amt, ref purpose } => { + 19u8.write(writer)?; + write_tlv_fields!(writer, { + (0, payment_hash, required), + (2, purpose, required), + (4, amt, required), + }); + }, // Note that, going forward, all new events must only write data inside of // `write_tlv_fields`. Versions 0.0.101+ will ignore odd-numbered events that write // data via `write_tlv_fields`. @@ -792,6 +825,25 @@ impl MaybeReadable for Event { // Value 17 is used for `Event::OpenChannelRequest`. Ok(None) }, + 19u8 => { + let f = || { + let mut payment_hash = PaymentHash([0; 32]); + let mut purpose = None; + let mut amt = 0; + read_tlv_fields!(reader, { + (0, payment_hash, required), + (2, purpose, ignorable), + (4, amt, required), + }); + if purpose.is_none() { return Ok(None); } + Ok(Some(Event::PaymentClaimed { + payment_hash, + purpose: purpose.unwrap(), + amt, + })) + }; + f() + }, // Versions prior to 0.0.100 did not ignore odd types, instead returning InvalidValue. // Version 0.0.100 failed to properly ignore odd types, possibly resulting in corrupt // reads. From 0e2542176b44519733f912de2973ed46915d2251 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 4 May 2022 18:12:09 +0000 Subject: [PATCH 5/9] Provide a redundant `Event::PaymentClaimed` on restart if needed If we crashed during a payment claim and then detected a partial claim on restart, we should ensure the user is aware that the payment has been claimed. We do so here by using the new partial-claim detection logic to create a `PaymentClaimed` event. --- lightning/src/ln/channelmanager.rs | 12 ++++++++++-- lightning/src/ln/functional_tests.rs | 11 ++++++++++- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 98714252f5e..03e07671360 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -6837,9 +6837,12 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> for (_, monitor) in args.channel_monitors.iter() { for (payment_hash, payment_preimage) in monitor.get_stored_preimages() { - if let Some(claimable_htlcs) = claimable_htlcs.remove(&payment_hash) { + if let Some((payment_purpose, claimable_htlcs)) = claimable_htlcs.remove(&payment_hash) { log_info!(args.logger, "Re-claiming HTLCs with payment hash {} as we've released the preimage to a ChannelMonitor!", log_bytes!(payment_hash.0)); - for claimable_htlc in claimable_htlcs.1 { + let mut claimable_amt_msat = 0; + for claimable_htlc in claimable_htlcs { + claimable_amt_msat += claimable_htlc.value; + // Add a holding-cell claim of the payment to the Channel, which should be // applied ~immediately on peer reconnection. Because it won't generate a // new commitment transaction we can just provide the payment preimage to @@ -6863,6 +6866,11 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> previous_hop_monitor.provide_payment_preimage(&payment_hash, &payment_preimage, &args.tx_broadcaster, &args.fee_estimator, &args.logger); } } + pending_events_read.push(events::Event::PaymentClaimed { + payment_hash, + purpose: payment_purpose, + amt: claimable_amt_msat, + }); } } } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index ed16a1c025b..d033f85f6f7 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -9881,6 +9881,9 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { // To get to the correct state, on startup we should propagate the preimage to the // still-off-chain channel, claiming the HTLC as soon as the peer connects, with the monitor // receiving the preimage without a state update. + // + // Further, we should generate a `PaymentClaimed` event to inform the user that the payment was + // definitely claimed. let chanmon_cfgs = create_chanmon_cfgs(4); let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); @@ -9998,13 +10001,19 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { // commitment transaction. We should also still have the original PaymentReceived event we // never finished processing. let events = nodes[3].node.get_and_clear_pending_events(); - assert_eq!(events.len(), if persist_both_monitors { 3 } else { 2 }); + assert_eq!(events.len(), if persist_both_monitors { 4 } else { 3 }); if let Event::PaymentReceived { amt: 15_000_000, .. } = events[0] { } else { panic!(); } if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[1] { } else { panic!(); } if persist_both_monitors { if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[2] { } else { panic!(); } } + // On restart, we should also get a duplicate PaymentClaimed event as we persisted the + // ChannelManager prior to handling the original one. + if let Event::PaymentClaimed { payment_hash: our_payment_hash, amt: 15_000_000, .. } = events[if persist_both_monitors { 3 } else { 2 }] { + assert_eq!(payment_hash, our_payment_hash); + } else { panic!(); } + assert_eq!(nodes[3].node.list_channels().len(), if persist_both_monitors { 0 } else { 1 }); if !persist_both_monitors { // If one of the two channels is still live, reveal the payment preimage over it. From 11c2f12baaeb4fb8fcd3965eb1aaea7b8aa21722 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 19 Apr 2022 21:46:44 +0000 Subject: [PATCH 6/9] Do additional pre-flight checks before claiming a payment As additional sanity checks, before claiming a payment, we check that we have the full amount available in `claimable_htlcs` that the payment should be for. Concretely, this prevents one somewhat-absurd edge case where a user may receive an MPP payment, wait many *blocks* before claiming it, allowing us to fail the pending HTLCs and the sender to retry some subset of the payment before we go to claim. More generally, this is just good belt-and-suspenders against any edge cases we may have missed. --- lightning/src/ln/channelmanager.rs | 28 +++++++++++++ lightning/src/ln/functional_test_utils.rs | 9 +++- lightning/src/ln/functional_tests.rs | 51 +++++++++++++++++++++++ 3 files changed, 86 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 03e07671360..f92787a0a48 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3825,14 +3825,42 @@ impl ChannelMana // provide the preimage, so worrying too much about the optimal handling isn't worth // it. let mut claimable_amt_msat = 0; + let mut expected_amt_msat = None; let mut valid_mpp = true; for htlc in sources.iter() { if let None = channel_state.as_ref().unwrap().short_to_id.get(&htlc.prev_hop.short_channel_id) { valid_mpp = false; break; } + if expected_amt_msat.is_some() && expected_amt_msat != Some(htlc.total_msat) { + log_error!(self.logger, "Somehow ended up with an MPP payment with different total amounts - this should not be reachable!"); + debug_assert!(false); + valid_mpp = false; + break; + } + expected_amt_msat = Some(htlc.total_msat); + if let OnionPayload::Spontaneous(_) = &htlc.onion_payload { + // We don't currently support MPP for spontaneous payments, so just check + // that there's one payment here and move on. + if sources.len() != 1 { + log_error!(self.logger, "Somehow ended up with an MPP spontaneous payment - this should not be reachable!"); + debug_assert!(false); + valid_mpp = false; + break; + } + } + claimable_amt_msat += htlc.value; } + if sources.is_empty() || expected_amt_msat.is_none() { + log_info!(self.logger, "Attempted to claim an incomplete payment which no longer had any available HTLCs!"); + return; + } + if claimable_amt_msat != expected_amt_msat.unwrap() { + log_info!(self.logger, "Attempted to claim an incomplete payment, expected {} msat, had {} available to claim.", + expected_amt_msat.unwrap(), claimable_amt_msat); + return; + } let mut errs = Vec::new(); let mut claimed_any_htlcs = false; diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 458601542ca..6ed319ff422 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1735,13 +1735,18 @@ pub fn send_payment<'a, 'b, 'c>(origin: &Node<'a, 'b, 'c>, expected_route: &[&No claim_payment(&origin, expected_route, our_payment_preimage); } -pub fn fail_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_paths_slice: &[&[&Node<'a, 'b, 'c>]], skip_last: bool, our_payment_hash: PaymentHash) { - let mut expected_paths: Vec<_> = expected_paths_slice.iter().collect(); +pub fn fail_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_paths: &[&[&Node<'a, 'b, 'c>]], skip_last: bool, our_payment_hash: PaymentHash) { for path in expected_paths.iter() { assert_eq!(path.last().unwrap().node.get_our_node_id(), expected_paths[0].last().unwrap().node.get_our_node_id()); } assert!(expected_paths[0].last().unwrap().node.fail_htlc_backwards(&our_payment_hash)); expect_pending_htlcs_forwardable!(expected_paths[0].last().unwrap()); + + pass_failed_payment_back(origin_node, expected_paths, skip_last, our_payment_hash); +} + +pub fn pass_failed_payment_back<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_paths_slice: &[&[&Node<'a, 'b, 'c>]], skip_last: bool, our_payment_hash: PaymentHash) { + let mut expected_paths: Vec<_> = expected_paths_slice.iter().collect(); check_added_monitors!(expected_paths[0].last().unwrap(), expected_paths.len()); let mut per_path_msgs: Vec<((msgs::UpdateFailHTLC, msgs::CommitmentSigned), PublicKey)> = Vec::with_capacity(expected_paths.len()); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index d033f85f6f7..84c811f7485 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -9870,6 +9870,57 @@ fn test_keysend_payments_to_private_node() { claim_payment(&nodes[0], &path, test_preimage); } +#[test] +fn test_double_partial_claim() { + // Test what happens if a node receives a payment, generates a PaymentReceived event, the HTLCs + // time out, the sender resends only some of the MPP parts, then the user processes the + // PaymentReceived event, ensuring they don't inadvertently claim only part of the full payment + // amount. + let chanmon_cfgs = create_chanmon_cfgs(4); + let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); + let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0, InitFeatures::known(), InitFeatures::known()); + create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0, InitFeatures::known(), InitFeatures::known()); + create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known()); + create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known()); + + let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 15_000_000); + assert_eq!(route.paths.len(), 2); + route.paths.sort_by(|path_a, _| { + // Sort the path so that the path through nodes[1] comes first + if path_a[0].pubkey == nodes[1].node.get_our_node_id() { + core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater } + }); + + send_along_route_with_secret(&nodes[0], route.clone(), &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 15_000_000, payment_hash, payment_secret); + // nodes[3] has now received a PaymentReceived event...which it will take some (exorbitant) + // amount of time to respond to. + + // Connect some blocks to time out the payment + connect_blocks(&nodes[3], TEST_FINAL_CLTV); + connect_blocks(&nodes[0], TEST_FINAL_CLTV); // To get the same height for sending later + + expect_pending_htlcs_forwardable!(nodes[3]); + + pass_failed_payment_back(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash); + + // nodes[1] now retries one of the two paths... + nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap(); + check_added_monitors!(nodes[0], 2); + + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None); + + // At this point nodes[3] has received one half of the payment, and the user goes to handle + // that PaymentReceived event they got hours ago and never handled...we should refuse to claim. + nodes[3].node.claim_funds(payment_preimage); + check_added_monitors!(nodes[3], 0); + assert!(nodes[3].node.get_and_clear_pending_msg_events().is_empty()); +} + fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { // Test what happens if a node receives an MPP payment, claims it, but crashes before // persisting the ChannelManager. If `persist_both_monitors` is false, also crash after only From a12d37e0633d94f1651cd24b656024b84f68e3cb Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 19 Apr 2022 22:06:50 +0000 Subject: [PATCH 7/9] Drop return value from `fail_htlc_backwards`, clarify docs `ChannelManager::fail_htlc_backwards`' bool return value is quite confusing - just because it returns false doesn't mean the payment wasn't (already) failed. Worse, in some race cases around shutdown where a payment was claimed before an unclean shutdown and then retried on startup, `fail_htlc_backwards` could return true even though (a duplicate copy of the same payment) was claimed, but the claim event has not been seen by the user yet. While its possible to use it correctly, its somewhat confusing to have a return value at all, and definitely lends itself to misuse. Instead, we should push users towards a model where they don't care if `fail_htlc_backwards` succeeds - either they've locally marked the payment as failed (prior to seeing any `PaymentReceived` events) and will fail any attempts to pay it, or they have not and the payment is still receivable until its timeout time is reached. We can revisit this decision based on user feedback, but will need to very carefully document the potential failure modes here if we do. --- fuzz/src/chanmon_consistency.rs | 2 +- lightning/src/ln/chanmon_update_fail_tests.rs | 6 ++--- lightning/src/ln/channelmanager.rs | 17 +++++++++----- lightning/src/ln/functional_test_utils.rs | 4 ++-- lightning/src/ln/functional_tests.rs | 22 +++++++++---------- lightning/src/ln/onion_route_tests.rs | 2 +- 6 files changed, 30 insertions(+), 23 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 2fe61aac42f..540b4e4c497 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -840,7 +840,7 @@ pub fn do_test(data: &[u8], underlying_out: Out) { events::Event::PaymentReceived { payment_hash, .. } => { if claim_set.insert(payment_hash.0) { if $fail { - assert!(nodes[$node].fail_htlc_backwards(&payment_hash)); + nodes[$node].fail_htlc_backwards(&payment_hash); } else { nodes[$node].claim_funds(PaymentPreimage(payment_hash.0)); } diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 52441629aa0..af4b024a8e9 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -831,7 +831,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA - assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1)); + nodes[2].node.fail_htlc_backwards(&payment_hash_1); expect_pending_htlcs_forwardable!(nodes[2]); check_added_monitors!(nodes[2], 1); @@ -1696,7 +1696,7 @@ fn test_monitor_update_on_pending_forwards() { send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000); let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); - assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1)); + nodes[2].node.fail_htlc_backwards(&payment_hash_1); expect_pending_htlcs_forwardable!(nodes[2]); check_added_monitors!(nodes[2], 1); @@ -2468,7 +2468,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f payment_preimage, }; if second_fails { - assert!(nodes[2].node.fail_htlc_backwards(&payment_hash)); + nodes[2].node.fail_htlc_backwards(&payment_hash); expect_pending_htlcs_forwardable!(nodes[2]); check_added_monitors!(nodes[2], 1); get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index f92787a0a48..19d0b577318 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3500,9 +3500,17 @@ impl ChannelMana /// Indicates that the preimage for payment_hash is unknown or the received amount is incorrect /// after a PaymentReceived event, failing the HTLC back to its origin and freeing resources /// along the path (including in our own channel on which we received it). - /// Returns false if no payment was found to fail backwards, true if the process of failing the - /// HTLC backwards has been started. - pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) -> bool { + /// + /// Note that in some cases around unclean shutdown, it is possible the payment may have + /// already been claimed by you via [`ChannelManager::claim_funds`] prior to you seeing (a + /// second copy of) the [`events::Event::PaymentReceived`] event. Alternatively, the payment + /// may have already been failed automatically by LDK if it was nearing its expiration time. + /// + /// While LDK will never claim a payment automatically on your behalf (i.e. without you calling + /// [`ChannelManager::claim_funds`]), you should still monitor for + /// [`events::Event::PaymentClaimed`] events even for payments you intend to fail, especially on + /// startup during which time claims that were in-progress at shutdown may be replayed. + pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let mut channel_state = Some(self.channel_state.lock().unwrap()); @@ -3517,8 +3525,7 @@ impl ChannelMana HTLCSource::PreviousHopData(htlc.prev_hop), payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data }); } - true - } else { false } + } } /// Gets an HTLC onion failure code and error data for an `UPDATE` error, given the error code diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 6ed319ff422..e747810f1b5 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1739,7 +1739,7 @@ pub fn fail_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expe for path in expected_paths.iter() { assert_eq!(path.last().unwrap().node.get_our_node_id(), expected_paths[0].last().unwrap().node.get_our_node_id()); } - assert!(expected_paths[0].last().unwrap().node.fail_htlc_backwards(&our_payment_hash)); + expected_paths[0].last().unwrap().node.fail_htlc_backwards(&our_payment_hash); expect_pending_htlcs_forwardable!(expected_paths[0].last().unwrap()); pass_failed_payment_back(origin_node, expected_paths, skip_last, our_payment_hash); @@ -1845,7 +1845,7 @@ pub fn pass_failed_payment_back<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expe } // Ensure that fail_htlc_backwards is idempotent. - assert!(!expected_paths[0].last().unwrap().node.fail_htlc_backwards(&our_payment_hash)); + expected_paths[0].last().unwrap().node.fail_htlc_backwards(&our_payment_hash); assert!(expected_paths[0].last().unwrap().node.get_and_clear_pending_events().is_empty()); assert!(expected_paths[0].last().unwrap().node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(expected_paths[0].last().unwrap(), 0); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 84c811f7485..121d8482496 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -3089,7 +3089,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use let (_, second_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value); let (_, third_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value); - assert!(nodes[2].node.fail_htlc_backwards(&first_payment_hash)); + nodes[2].node.fail_htlc_backwards(&first_payment_hash); expect_pending_htlcs_forwardable!(nodes[2]); check_added_monitors!(nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); @@ -3102,7 +3102,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use let bs_raa = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true); // Drop the last RAA from 3 -> 2 - assert!(nodes[2].node.fail_htlc_backwards(&second_payment_hash)); + nodes[2].node.fail_htlc_backwards(&second_payment_hash); expect_pending_htlcs_forwardable!(nodes[2]); check_added_monitors!(nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); @@ -3119,7 +3119,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa); check_added_monitors!(nodes[2], 1); - assert!(nodes[2].node.fail_htlc_backwards(&third_payment_hash)); + nodes[2].node.fail_htlc_backwards(&third_payment_hash); expect_pending_htlcs_forwardable!(nodes[2]); check_added_monitors!(nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); @@ -5518,10 +5518,10 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno // Now fail back three of the over-dust-limit and three of the under-dust-limit payments in one go. // Fail 0th below-dust, 4th above-dust, 8th above-dust, 10th below-dust HTLCs - assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_1)); - assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_3)); - assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_5)); - assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_6)); + nodes[4].node.fail_htlc_backwards(&payment_hash_1); + nodes[4].node.fail_htlc_backwards(&payment_hash_3); + nodes[4].node.fail_htlc_backwards(&payment_hash_5); + nodes[4].node.fail_htlc_backwards(&payment_hash_6); check_added_monitors!(nodes[4], 0); expect_pending_htlcs_forwardable!(nodes[4]); check_added_monitors!(nodes[4], 1); @@ -5534,8 +5534,8 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno commitment_signed_dance!(nodes[3], nodes[4], four_removes.commitment_signed, false); // Fail 3rd below-dust and 7th above-dust HTLCs - assert!(nodes[5].node.fail_htlc_backwards(&payment_hash_2)); - assert!(nodes[5].node.fail_htlc_backwards(&payment_hash_4)); + nodes[5].node.fail_htlc_backwards(&payment_hash_2); + nodes[5].node.fail_htlc_backwards(&payment_hash_4); check_added_monitors!(nodes[5], 0); expect_pending_htlcs_forwardable!(nodes[5]); check_added_monitors!(nodes[5], 1); @@ -5960,7 +5960,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no // actually revoked. let htlc_value = if use_dust { 50000 } else { 3000000 }; let (_, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], htlc_value); - assert!(nodes[1].node.fail_htlc_backwards(&our_payment_hash)); + nodes[1].node.fail_htlc_backwards(&our_payment_hash); expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors!(nodes[1], 1); @@ -7136,7 +7136,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { let as_prev_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2); // Fail one HTLC to prune it in the will-be-latest-local commitment tx - assert!(nodes[1].node.fail_htlc_backwards(&payment_hash_2)); + nodes[1].node.fail_htlc_backwards(&payment_hash_2); check_added_monitors!(nodes[1], 0); expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors!(nodes[1], 1); diff --git a/lightning/src/ln/onion_route_tests.rs b/lightning/src/ln/onion_route_tests.rs index 9a07603fafe..802cd3acab4 100644 --- a/lightning/src/ln/onion_route_tests.rs +++ b/lightning/src/ln/onion_route_tests.rs @@ -1155,7 +1155,7 @@ fn test_phantom_failure_reject_payment() { expect_pending_htlcs_forwardable_ignore!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); expect_payment_received!(nodes[1], payment_hash, payment_secret, recv_amt_msat); - assert!(nodes[1].node.fail_htlc_backwards(&payment_hash)); + nodes[1].node.fail_htlc_backwards(&payment_hash); expect_pending_htlcs_forwardable_ignore!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); From 8a5670245a111b31b816c99c5b9a64c0df253e76 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 22 Apr 2022 17:58:19 +0000 Subject: [PATCH 8/9] Add internal docs for ChannelMonitor::payment_preimages --- lightning/src/chain/channelmonitor.rs | 4 ++++ lightning/src/ln/channel.rs | 6 +++--- lightning/src/ln/channelmanager.rs | 4 +++- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index fd66e585208..f0ee1c85937 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -655,6 +655,10 @@ pub(crate) struct ChannelMonitorImpl { // deserialization current_holder_commitment_number: u64, + /// The set of payment hashes from inbound payments for which we know the preimage. Payment + /// preimages that are not included in any unrevoked local commitment transaction or unrevoked + /// remote commitment transactions are automatically removed when commitment transactions are + /// revoked. payment_preimages: HashMap, // Note that `MonitorEvent`s MUST NOT be generated during update processing, only generated diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 1d204d18a17..cde1e8a13bd 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1703,11 +1703,11 @@ impl Channel { make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey()) } - /// Claims an HTLC while we're disconnected from a peer, dropping the ChannelMonitorUpdate + /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`] /// entirely. /// - /// The ChannelMonitor for this channel MUST be updated out-of-band with the preimage provided - /// (i.e. without calling [`crate::chain::Watch::update_channel`]). + /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage + /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]). /// /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is /// disconnected). diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 19d0b577318..5a58958b413 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -414,7 +414,9 @@ pub(super) struct ChannelHolder { /// guarantees are made about the existence of a channel with the short id here, nor the short /// ids in the PendingHTLCInfo! pub(super) forward_htlcs: HashMap>, - /// Map from payment hash to any HTLCs which are to us and can be failed/claimed by the user. + /// Map from payment hash to the payment data and any HTLCs which are to us and can be + /// failed/claimed by the user. + /// /// Note that while this is held in the same mutex as the channels themselves, no consistency /// guarantees are made about the channels given here actually existing anymore by the time you /// go to read them! From 531d6c8663abf4f3d673b78ca4c2db68411f979e Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 28 May 2022 00:27:14 +0000 Subject: [PATCH 9/9] Change `Event` `amt` fields to `amount_msat` for clarity --- lightning/src/ln/chanmon_update_fail_tests.rs | 20 ++++++++-------- lightning/src/ln/channelmanager.rs | 8 +++---- lightning/src/ln/functional_test_utils.rs | 12 +++++----- lightning/src/ln/functional_tests.rs | 18 +++++++------- lightning/src/util/events.rs | 24 +++++++++---------- 5 files changed, 42 insertions(+), 40 deletions(-) diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index af4b024a8e9..5d0ef759cf7 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -191,9 +191,9 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { let events_3 = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events_3.len(), 1); match events_3[0] { - Event::PaymentReceived { ref payment_hash, ref purpose, amt } => { + Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => { assert_eq!(payment_hash_1, *payment_hash); - assert_eq!(amt, 1000000); + assert_eq!(amount_msat, 1_000_000); match &purpose { PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { assert!(payment_preimage.is_none()); @@ -559,9 +559,9 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let events_5 = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events_5.len(), 1); match events_5[0] { - Event::PaymentReceived { ref payment_hash, ref purpose, amt } => { + Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => { assert_eq!(payment_hash_2, *payment_hash); - assert_eq!(amt, 1000000); + assert_eq!(amount_msat, 1_000_000); match &purpose { PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { assert!(payment_preimage.is_none()); @@ -676,9 +676,9 @@ fn test_monitor_update_fail_cs() { let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentReceived { payment_hash, ref purpose, amt } => { + Event::PaymentReceived { payment_hash, ref purpose, amount_msat } => { assert_eq!(payment_hash, our_payment_hash); - assert_eq!(amt, 1000000); + assert_eq!(amount_msat, 1_000_000); match &purpose { PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { assert!(payment_preimage.is_none()); @@ -1650,9 +1650,9 @@ fn test_monitor_update_fail_claim() { let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match events[0] { - Event::PaymentReceived { ref payment_hash, ref purpose, amt } => { + Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => { assert_eq!(payment_hash_2, *payment_hash); - assert_eq!(1_000_000, amt); + assert_eq!(1_000_000, amount_msat); match &purpose { PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { assert!(payment_preimage.is_none()); @@ -1664,9 +1664,9 @@ fn test_monitor_update_fail_claim() { _ => panic!("Unexpected event"), } match events[1] { - Event::PaymentReceived { ref payment_hash, ref purpose, amt } => { + Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => { assert_eq!(payment_hash_3, *payment_hash); - assert_eq!(1_000_000, amt); + assert_eq!(1_000_000, amount_msat); match &purpose { PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { assert!(payment_preimage.is_none()); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 5a58958b413..40a92dffab3 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3184,7 +3184,7 @@ impl ChannelMana new_events.push(events::Event::PaymentReceived { payment_hash, purpose: purpose(), - amt: total_value, + amount_msat: total_value, }); payment_received_generated = true; } else { @@ -3225,7 +3225,7 @@ impl ChannelMana e.insert((purpose.clone(), vec![claimable_htlc])); new_events.push(events::Event::PaymentReceived { payment_hash, - amt: amt_to_forward, + amount_msat: amt_to_forward, purpose, }); }, @@ -3908,7 +3908,7 @@ impl ChannelMana self.pending_events.lock().unwrap().push(events::Event::PaymentClaimed { payment_hash, purpose: payment_purpose, - amt: claimable_amt_msat, + amount_msat: claimable_amt_msat, }); } @@ -6906,7 +6906,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> pending_events_read.push(events::Event::PaymentClaimed { payment_hash, purpose: payment_purpose, - amt: claimable_amt_msat, + amount_msat: claimable_amt_msat, }); } } diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index e747810f1b5..e176782b658 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1252,9 +1252,9 @@ macro_rules! expect_payment_received { let events = $node.node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - $crate::util::events::Event::PaymentReceived { ref payment_hash, ref purpose, amt } => { + $crate::util::events::Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => { assert_eq!($expected_payment_hash, *payment_hash); - assert_eq!($expected_recv_value, amt); + assert_eq!($expected_recv_value, amount_msat); match purpose { $crate::util::events::PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { assert_eq!(&$expected_payment_preimage, payment_preimage); @@ -1275,9 +1275,9 @@ macro_rules! expect_payment_claimed { let events = $node.node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - $crate::util::events::Event::PaymentClaimed { ref payment_hash, amt, .. } => { + $crate::util::events::Event::PaymentClaimed { ref payment_hash, amount_msat, .. } => { assert_eq!($expected_payment_hash, *payment_hash); - assert_eq!($expected_recv_value, amt); + assert_eq!($expected_recv_value, amount_msat); }, _ => panic!("Unexpected event"), } @@ -1512,7 +1512,7 @@ pub fn do_pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_p if payment_received_expected { assert_eq!(events_2.len(), 1); match events_2[0] { - Event::PaymentReceived { ref payment_hash, ref purpose, amt} => { + Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => { assert_eq!(our_payment_hash, *payment_hash); match &purpose { PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { @@ -1524,7 +1524,7 @@ pub fn do_pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_p assert!(our_payment_secret.is_none()); }, } - assert_eq!(amt, recv_value); + assert_eq!(amount_msat, recv_value); }, _ => panic!("Unexpected event"), } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 121d8482496..d1a68a90823 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -1954,9 +1954,9 @@ fn test_channel_reserve_holding_cell_htlcs() { let events = nodes[2].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match events[0] { - Event::PaymentReceived { ref payment_hash, ref purpose, amt } => { + Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => { assert_eq!(our_payment_hash_21, *payment_hash); - assert_eq!(recv_value_21, amt); + assert_eq!(recv_value_21, amount_msat); match &purpose { PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { assert!(payment_preimage.is_none()); @@ -1968,9 +1968,9 @@ fn test_channel_reserve_holding_cell_htlcs() { _ => panic!("Unexpected event"), } match events[1] { - Event::PaymentReceived { ref payment_hash, ref purpose, amt } => { + Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => { assert_eq!(our_payment_hash_22, *payment_hash); - assert_eq!(recv_value_22, amt); + assert_eq!(recv_value_22, amount_msat); match &purpose { PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { assert!(payment_preimage.is_none()); @@ -3709,9 +3709,9 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken let events_2 = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events_2.len(), 1); match events_2[0] { - Event::PaymentReceived { ref payment_hash, ref purpose, amt } => { + Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => { assert_eq!(payment_hash_1, *payment_hash); - assert_eq!(amt, 1000000); + assert_eq!(amount_msat, 1_000_000); match &purpose { PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { assert!(payment_preimage.is_none()); @@ -10053,7 +10053,7 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { // never finished processing. let events = nodes[3].node.get_and_clear_pending_events(); assert_eq!(events.len(), if persist_both_monitors { 4 } else { 3 }); - if let Event::PaymentReceived { amt: 15_000_000, .. } = events[0] { } else { panic!(); } + if let Event::PaymentReceived { amount_msat: 15_000_000, .. } = events[0] { } else { panic!(); } if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[1] { } else { panic!(); } if persist_both_monitors { if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[2] { } else { panic!(); } @@ -10061,7 +10061,9 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { // On restart, we should also get a duplicate PaymentClaimed event as we persisted the // ChannelManager prior to handling the original one. - if let Event::PaymentClaimed { payment_hash: our_payment_hash, amt: 15_000_000, .. } = events[if persist_both_monitors { 3 } else { 2 }] { + if let Event::PaymentClaimed { payment_hash: our_payment_hash, amount_msat: 15_000_000, .. } = + events[if persist_both_monitors { 3 } else { 2 }] + { assert_eq!(payment_hash, our_payment_hash); } else { panic!(); } diff --git a/lightning/src/util/events.rs b/lightning/src/util/events.rs index 5c00a05c370..9a1f6c5bc50 100644 --- a/lightning/src/util/events.rs +++ b/lightning/src/util/events.rs @@ -209,7 +209,7 @@ pub enum Event { /// not stop you from registering duplicate payment hashes for inbound payments. payment_hash: PaymentHash, /// The value, in thousandths of a satoshi, that this payment is for. - amt: u64, + amount_msat: u64, /// Information for claiming this received payment, based on whether the purpose of the /// payment is to pay an invoice or to send a spontaneous payment. purpose: PaymentPurpose, @@ -233,7 +233,7 @@ pub enum Event { /// registering duplicate payment hashes for inbound payments. payment_hash: PaymentHash, /// The value, in thousandths of a satoshi, that this payment is for. - amt: u64, + amount_msat: u64, /// The purpose of this claimed payment, i.e. whether the payment was for an invoice or a /// spontaneous payment. purpose: PaymentPurpose, @@ -508,7 +508,7 @@ impl Writeable for Event { // We never write out FundingGenerationReady events as, upon disconnection, peers // drop any channels which have not yet exchanged funding_signed. }, - &Event::PaymentReceived { ref payment_hash, ref amt, ref purpose } => { + &Event::PaymentReceived { ref payment_hash, ref amount_msat, ref purpose } => { 1u8.write(writer)?; let mut payment_secret = None; let payment_preimage; @@ -524,7 +524,7 @@ impl Writeable for Event { write_tlv_fields!(writer, { (0, payment_hash, required), (2, payment_secret, option), - (4, amt, required), + (4, amount_msat, required), (6, 0u64, required), // user_payment_id required for compatibility with 0.0.103 and earlier (8, payment_preimage, option), }); @@ -617,12 +617,12 @@ impl Writeable for Event { // We never write the OpenChannelRequest events as, upon disconnection, peers // drop any channels which have not yet exchanged funding_signed. }, - &Event::PaymentClaimed { ref payment_hash, ref amt, ref purpose } => { + &Event::PaymentClaimed { ref payment_hash, ref amount_msat, ref purpose } => { 19u8.write(writer)?; write_tlv_fields!(writer, { (0, payment_hash, required), (2, purpose, required), - (4, amt, required), + (4, amount_msat, required), }); }, // Note that, going forward, all new events must only write data inside of @@ -643,12 +643,12 @@ impl MaybeReadable for Event { let mut payment_hash = PaymentHash([0; 32]); let mut payment_preimage = None; let mut payment_secret = None; - let mut amt = 0; + let mut amount_msat = 0; let mut _user_payment_id = None::; // For compatibility with 0.0.103 and earlier read_tlv_fields!(reader, { (0, payment_hash, required), (2, payment_secret, option), - (4, amt, required), + (4, amount_msat, required), (6, _user_payment_id, option), (8, payment_preimage, option), }); @@ -662,7 +662,7 @@ impl MaybeReadable for Event { }; Ok(Some(Event::PaymentReceived { payment_hash, - amt, + amount_msat, purpose, })) }; @@ -829,17 +829,17 @@ impl MaybeReadable for Event { let f = || { let mut payment_hash = PaymentHash([0; 32]); let mut purpose = None; - let mut amt = 0; + let mut amount_msat = 0; read_tlv_fields!(reader, { (0, payment_hash, required), (2, purpose, ignorable), - (4, amt, required), + (4, amount_msat, required), }); if purpose.is_none() { return Ok(None); } Ok(Some(Event::PaymentClaimed { payment_hash, purpose: purpose.unwrap(), - amt, + amount_msat, })) }; f()