-
Notifications
You must be signed in to change notification settings - Fork 381
/
Copy pathchannel.rs
8464 lines (7467 loc) · 474 KB
/
channel.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// This file is Copyright its original authors, visible in version control
// history.
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
use bitcoin::blockdata::script::{Script,Builder};
use bitcoin::blockdata::transaction::{Transaction, EcdsaSighashType};
use bitcoin::util::sighash;
use bitcoin::consensus::encode;
use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::hashes::sha256d::Hash as Sha256d;
use bitcoin::hash_types::{Txid, BlockHash};
use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
use bitcoin::secp256k1::{PublicKey,SecretKey};
use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
use bitcoin::secp256k1;
use crate::ln::{PaymentPreimage, PaymentHash};
use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
use crate::ln::msgs;
use crate::ln::msgs::DecodeError;
use crate::ln::script::{self, ShutdownScript};
use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT};
use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
use crate::ln::chan_utils;
use crate::ln::onion_utils::HTLCFailReason;
use crate::chain::BestBlock;
use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
use crate::chain::transaction::{OutPoint, TransactionData};
use crate::sign::{WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
use crate::events::ClosureReason;
use crate::routing::gossip::NodeId;
use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter};
use crate::util::logger::Logger;
use crate::util::errors::APIError;
use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits};
use crate::util::scid_utils::scid_from_parts;
use crate::io;
use crate::prelude::*;
use core::{cmp,mem,fmt};
use core::ops::Deref;
#[cfg(any(test, fuzzing, debug_assertions))]
use crate::sync::Mutex;
use bitcoin::hashes::hex::ToHex;
#[cfg(test)]
pub struct ChannelValueStat {
pub value_to_self_msat: u64,
pub channel_value_msat: u64,
pub channel_reserve_msat: u64,
pub pending_outbound_htlcs_amount_msat: u64,
pub pending_inbound_htlcs_amount_msat: u64,
pub holding_cell_outbound_amount_msat: u64,
pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
pub counterparty_dust_limit_msat: u64,
}
pub struct AvailableBalances {
/// The amount that would go to us if we close the channel, ignoring any on-chain fees.
pub balance_msat: u64,
/// Total amount available for our counterparty to send to us.
pub inbound_capacity_msat: u64,
/// Total amount available for us to send to our counterparty.
pub outbound_capacity_msat: u64,
/// The maximum value we can assign to the next outbound HTLC
pub next_outbound_htlc_limit_msat: u64,
}
#[derive(Debug, Clone, Copy, PartialEq)]
enum FeeUpdateState {
// Inbound states mirroring InboundHTLCState
RemoteAnnounced,
AwaitingRemoteRevokeToAnnounce,
// Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
// handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
// distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
// the fee update anywhere, we can simply consider the fee update `Committed` immediately
// instead of setting it to AwaitingAnnouncedRemoteRevoke.
// Outbound state can only be `LocalAnnounced` or `Committed`
Outbound,
}
enum InboundHTLCRemovalReason {
FailRelay(msgs::OnionErrorPacket),
FailMalformed(([u8; 32], u16)),
Fulfill(PaymentPreimage),
}
enum InboundHTLCState {
/// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
/// update_add_htlc message for this HTLC.
RemoteAnnounced(PendingHTLCStatus),
/// Included in a received commitment_signed message (implying we've
/// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
/// state (see the example below). We have not yet included this HTLC in a
/// commitment_signed message because we are waiting on the remote's
/// aforementioned state revocation. One reason this missing remote RAA
/// (revoke_and_ack) blocks us from constructing a commitment_signed message
/// is because every time we create a new "state", i.e. every time we sign a
/// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
/// which are provided one-at-a-time in each RAA. E.g., the last RAA they
/// sent provided the per_commitment_point for our current commitment tx.
/// The other reason we should not send a commitment_signed without their RAA
/// is because their RAA serves to ACK our previous commitment_signed.
///
/// Here's an example of how an HTLC could come to be in this state:
/// remote --> update_add_htlc(prev_htlc) --> local
/// remote --> commitment_signed(prev_htlc) --> local
/// remote <-- revoke_and_ack <-- local
/// remote <-- commitment_signed(prev_htlc) <-- local
/// [note that here, the remote does not respond with a RAA]
/// remote --> update_add_htlc(this_htlc) --> local
/// remote --> commitment_signed(prev_htlc, this_htlc) --> local
/// Now `this_htlc` will be assigned this state. It's unable to be officially
/// accepted, i.e. included in a commitment_signed, because we're missing the
/// RAA that provides our next per_commitment_point. The per_commitment_point
/// is used to derive commitment keys, which are used to construct the
/// signatures in a commitment_signed message.
/// Implies AwaitingRemoteRevoke.
///
/// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
/// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
/// We have also included this HTLC in our latest commitment_signed and are now just waiting
/// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
/// channel (before it can then get forwarded and/or removed).
/// Implies AwaitingRemoteRevoke.
AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
Committed,
/// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
/// created it we would have put it in the holding cell instead). When they next revoke_and_ack
/// we'll drop it.
/// Note that we have to keep an eye on the HTLC until we've received a broadcastable
/// commitment transaction without it as otherwise we'll have to force-close the channel to
/// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
/// anyway). That said, ChannelMonitor does this for us (see
/// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
/// our own local state before then, once we're sure that the next commitment_signed and
/// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
LocalRemoved(InboundHTLCRemovalReason),
}
struct InboundHTLCOutput {
htlc_id: u64,
amount_msat: u64,
cltv_expiry: u32,
payment_hash: PaymentHash,
state: InboundHTLCState,
}
enum OutboundHTLCState {
/// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
/// created it we would have put it in the holding cell instead). When they next revoke_and_ack
/// we will promote to Committed (note that they may not accept it until the next time we
/// revoke, but we don't really care about that:
/// * they've revoked, so worst case we can announce an old state and get our (option on)
/// money back (though we won't), and,
/// * we'll send them a revoke when they send a commitment_signed, and since only they're
/// allowed to remove it, the "can only be removed once committed on both sides" requirement
/// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
/// we'll never get out of sync).
/// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
/// OutboundHTLCOutput's size just for a temporary bit
LocalAnnounced(Box<msgs::OnionPacket>),
Committed,
/// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
/// the change (though they'll need to revoke before we fail the payment).
RemoteRemoved(OutboundHTLCOutcome),
/// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
/// the remote side hasn't yet revoked their previous state, which we need them to do before we
/// can do any backwards failing. Implies AwaitingRemoteRevoke.
/// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
/// remote revoke_and_ack on a previous state before we can do so.
AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
/// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
/// the remote side hasn't yet revoked their previous state, which we need them to do before we
/// can do any backwards failing. Implies AwaitingRemoteRevoke.
/// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
/// revoke_and_ack to drop completely.
AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
}
#[derive(Clone)]
enum OutboundHTLCOutcome {
/// LDK version 0.0.105+ will always fill in the preimage here.
Success(Option<PaymentPreimage>),
Failure(HTLCFailReason),
}
impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
fn from(o: Option<HTLCFailReason>) -> Self {
match o {
None => OutboundHTLCOutcome::Success(None),
Some(r) => OutboundHTLCOutcome::Failure(r)
}
}
}
impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
fn into(self) -> Option<&'a HTLCFailReason> {
match self {
OutboundHTLCOutcome::Success(_) => None,
OutboundHTLCOutcome::Failure(ref r) => Some(r)
}
}
}
struct OutboundHTLCOutput {
htlc_id: u64,
amount_msat: u64,
cltv_expiry: u32,
payment_hash: PaymentHash,
state: OutboundHTLCState,
source: HTLCSource,
}
/// See AwaitingRemoteRevoke ChannelState for more info
enum HTLCUpdateAwaitingACK {
AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
// always outbound
amount_msat: u64,
cltv_expiry: u32,
payment_hash: PaymentHash,
source: HTLCSource,
onion_routing_packet: msgs::OnionPacket,
},
ClaimHTLC {
payment_preimage: PaymentPreimage,
htlc_id: u64,
},
FailHTLC {
htlc_id: u64,
err_packet: msgs::OnionErrorPacket,
},
}
/// There are a few "states" and then a number of flags which can be applied:
/// We first move through init with OurInitSent -> TheirInitSent -> FundingCreated -> FundingSent.
/// TheirChannelReady and OurChannelReady then get set on FundingSent, and when both are set we
/// move on to ChannelReady.
/// Note that PeerDisconnected can be set on both ChannelReady and FundingSent.
/// ChannelReady can then get all remaining flags set on it, until we finish shutdown, then we
/// move on to ShutdownComplete, at which point most calls into this channel are disallowed.
enum ChannelState {
/// Implies we have (or are prepared to) send our open_channel/accept_channel message
OurInitSent = 1 << 0,
/// Implies we have received their open_channel/accept_channel message
TheirInitSent = 1 << 1,
/// We have sent funding_created and are awaiting a funding_signed to advance to FundingSent.
/// Note that this is nonsense for an inbound channel as we immediately generate funding_signed
/// upon receipt of funding_created, so simply skip this state.
FundingCreated = 4,
/// Set when we have received/sent funding_created and funding_signed and are thus now waiting
/// on the funding transaction to confirm. The ChannelReady flags are set to indicate when we
/// and our counterparty consider the funding transaction confirmed.
FundingSent = 8,
/// Flag which can be set on FundingSent to indicate they sent us a channel_ready message.
/// Once both TheirChannelReady and OurChannelReady are set, state moves on to ChannelReady.
TheirChannelReady = 1 << 4,
/// Flag which can be set on FundingSent to indicate we sent them a channel_ready message.
/// Once both TheirChannelReady and OurChannelReady are set, state moves on to ChannelReady.
OurChannelReady = 1 << 5,
ChannelReady = 64,
/// Flag which is set on ChannelReady and FundingSent indicating remote side is considered
/// "disconnected" and no updates are allowed until after we've done a channel_reestablish
/// dance.
PeerDisconnected = 1 << 7,
/// Flag which is set on ChannelReady, FundingCreated, and FundingSent indicating the user has
/// told us a ChannelMonitor update is pending async persistence somewhere and we should pause
/// sending any outbound messages until they've managed to finish.
MonitorUpdateInProgress = 1 << 8,
/// Flag which implies that we have sent a commitment_signed but are awaiting the responding
/// revoke_and_ack message. During this time period, we can't generate new commitment_signed
/// messages as then we will be unable to determine which HTLCs they included in their
/// revoke_and_ack implicit ACK, so instead we have to hold them away temporarily to be sent
/// later.
/// Flag is set on ChannelReady.
AwaitingRemoteRevoke = 1 << 9,
/// Flag which is set on ChannelReady or FundingSent after receiving a shutdown message from
/// the remote end. If set, they may not add any new HTLCs to the channel, and we are expected
/// to respond with our own shutdown message when possible.
RemoteShutdownSent = 1 << 10,
/// Flag which is set on ChannelReady or FundingSent after sending a shutdown message. At this
/// point, we may not add any new HTLCs to the channel.
LocalShutdownSent = 1 << 11,
/// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about
/// to drop us, but we store this anyway.
ShutdownComplete = 4096,
}
const BOTH_SIDES_SHUTDOWN_MASK: u32 = ChannelState::LocalShutdownSent as u32 | ChannelState::RemoteShutdownSent as u32;
const MULTI_STATE_FLAGS: u32 = BOTH_SIDES_SHUTDOWN_MASK | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32;
pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
/// The "channel disabled" bit in channel_update must be set based on whether we are connected to
/// our counterparty or not. However, we don't want to announce updates right away to avoid
/// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
/// our channel_update message and track the current state here.
/// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
#[derive(Clone, Copy, PartialEq)]
pub(super) enum ChannelUpdateStatus {
/// We've announced the channel as enabled and are connected to our peer.
Enabled,
/// Our channel is no longer live, but we haven't announced the channel as disabled yet.
DisabledStaged(u8),
/// Our channel is live again, but we haven't announced the channel as enabled yet.
EnabledStaged(u8),
/// We've announced the channel as disabled.
Disabled,
}
/// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
#[derive(PartialEq)]
pub enum AnnouncementSigsState {
/// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
/// we sent the last `AnnouncementSignatures`.
NotSent,
/// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
/// This state never appears on disk - instead we write `NotSent`.
MessageSent,
/// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
/// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
/// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
/// they send back a `RevokeAndACK`.
/// This state never appears on disk - instead we write `NotSent`.
Committed,
/// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
/// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
PeerReceived,
}
/// An enum indicating whether the local or remote side offered a given HTLC.
enum HTLCInitiator {
LocalOffered,
RemoteOffered,
}
/// An enum gathering stats on pending HTLCs, either inbound or outbound side.
struct HTLCStats {
pending_htlcs: u32,
pending_htlcs_value_msat: u64,
on_counterparty_tx_dust_exposure_msat: u64,
on_holder_tx_dust_exposure_msat: u64,
holding_cell_msat: u64,
on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
}
/// An enum gathering stats on commitment transaction, either local or remote.
struct CommitmentStats<'a> {
tx: CommitmentTransaction, // the transaction info
feerate_per_kw: u32, // the feerate included to build the transaction
total_fee_sat: u64, // the total fee included in the transaction
num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
local_balance_msat: u64, // local balance before fees but considering dust limits
remote_balance_msat: u64, // remote balance before fees but considering dust limits
preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
}
/// Used when calculating whether we or the remote can afford an additional HTLC.
struct HTLCCandidate {
amount_msat: u64,
origin: HTLCInitiator,
}
impl HTLCCandidate {
fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
Self {
amount_msat,
origin,
}
}
}
/// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
/// description
enum UpdateFulfillFetch {
NewClaim {
monitor_update: ChannelMonitorUpdate,
htlc_value_msat: u64,
msg: Option<msgs::UpdateFulfillHTLC>,
},
DuplicateClaim {},
}
/// The return type of get_update_fulfill_htlc_and_commit.
pub enum UpdateFulfillCommitFetch<'a> {
/// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
/// it in the holding cell, or re-generated the update_fulfill message after the same claim was
/// previously placed in the holding cell (and has since been removed).
NewClaim {
/// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
monitor_update: &'a ChannelMonitorUpdate,
/// The value of the HTLC which was claimed, in msat.
htlc_value_msat: u64,
},
/// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
/// or has been forgotten (presumably previously claimed).
DuplicateClaim {},
}
/// The return value of `monitor_updating_restored`
pub(super) struct MonitorRestoreUpdates {
pub raa: Option<msgs::RevokeAndACK>,
pub commitment_update: Option<msgs::CommitmentUpdate>,
pub order: RAACommitmentOrder,
pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
pub finalized_claimed_htlcs: Vec<HTLCSource>,
pub funding_broadcastable: Option<Transaction>,
pub channel_ready: Option<msgs::ChannelReady>,
pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
}
/// The return value of `channel_reestablish`
pub(super) struct ReestablishResponses {
pub channel_ready: Option<msgs::ChannelReady>,
pub raa: Option<msgs::RevokeAndACK>,
pub commitment_update: Option<msgs::CommitmentUpdate>,
pub order: RAACommitmentOrder,
pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
pub shutdown_msg: Option<msgs::Shutdown>,
}
/// If the majority of the channels funds are to the fundee and the initiator holds only just
/// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
/// initiator controls the feerate, if they then go to increase the channel fee, they may have no
/// balance but the fundee is unable to send a payment as the increase in fee more than drains
/// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
/// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
/// by this multiple without hitting this case, before sending.
/// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
/// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
/// HTLCs for days we may need this to suffice for feerate increases across days, but that may
/// leave the channel less usable as we hold a bigger reserve.
#[cfg(any(fuzzing, test))]
pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
#[cfg(not(any(fuzzing, test)))]
const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
/// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
/// channel creation on an inbound channel, we simply force-close and move on.
/// This constant is the one suggested in BOLT 2.
pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
/// In case of a concurrent update_add_htlc proposed by our counterparty, we might
/// not have enough balance value remaining to cover the onchain cost of this new
/// HTLC weight. If this happens, our counterparty fails the reception of our
/// commitment_signed including this new HTLC due to infringement on the channel
/// reserve.
/// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
/// size 2. However, if the number of concurrent update_add_htlc is higher, this still
/// leads to a channel force-close. Ultimately, this is an issue coming from the
/// design of LN state machines, allowing asynchronous updates.
pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
/// When a channel is opened, we check that the funding amount is enough to pay for relevant
/// commitment transaction fees, with at least this many HTLCs present on the commitment
/// transaction (not counting the value of the HTLCs themselves).
pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
/// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
/// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
/// ChannelUpdate prompted by the config update. This value was determined as follows:
///
/// * The expected interval between ticks (1 minute).
/// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
/// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
/// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
// TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking
// has been completed, and then turn into a Channel to get compiler-time enforcement of things like
// calling channel_id() before we're set up or things like get_outbound_funding_signed on an
// inbound channel.
//
// Holder designates channel data owned for the benefice of the user client.
// Counterparty designates channel data owned by the another channel participant entity.
pub(super) struct Channel<Signer: ChannelSigner> {
config: LegacyChannelConfig,
// Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
// constructed using it. The second element in the tuple corresponds to the number of ticks that
// have elapsed since the update occurred.
prev_config: Option<(ChannelConfig, usize)>,
inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
user_id: u128,
channel_id: [u8; 32],
temporary_channel_id: Option<[u8; 32]>, // Will be `None` for channels created prior to 0.0.115.
channel_state: u32,
// When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
// our peer. However, we want to make sure they received it, or else rebroadcast it when we
// next connect.
// We do so here, see `AnnouncementSigsSent` for more details on the state(s).
// Note that a number of our tests were written prior to the behavior here which retransmits
// AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
// many tests.
#[cfg(any(test, feature = "_test_utils"))]
pub(crate) announcement_sigs_state: AnnouncementSigsState,
#[cfg(not(any(test, feature = "_test_utils")))]
announcement_sigs_state: AnnouncementSigsState,
secp_ctx: Secp256k1<secp256k1::All>,
channel_value_satoshis: u64,
latest_monitor_update_id: u64,
holder_signer: Signer,
shutdown_scriptpubkey: Option<ShutdownScript>,
destination_script: Script,
// Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
// generation start at 0 and count up...this simplifies some parts of implementation at the
// cost of others, but should really just be changed.
cur_holder_commitment_transaction_number: u64,
cur_counterparty_commitment_transaction_number: u64,
value_to_self_msat: u64, // Excluding all pending_htlcs, excluding fees
pending_inbound_htlcs: Vec<InboundHTLCOutput>,
pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
/// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
/// need to ensure we resend them in the order we originally generated them. Note that because
/// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
/// sufficient to simply set this to the opposite of any message we are generating as we
/// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
/// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
/// send it first.
resend_order: RAACommitmentOrder,
monitor_pending_channel_ready: bool,
monitor_pending_revoke_and_ack: bool,
monitor_pending_commitment_signed: bool,
// TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
// responsible for some of the HTLCs here or not - we don't know whether the update in question
// completed or not. We currently ignore these fields entirely when force-closing a channel,
// but need to handle this somehow or we run the risk of losing HTLCs!
monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
monitor_pending_finalized_fulfills: Vec<HTLCSource>,
// pending_update_fee is filled when sending and receiving update_fee.
//
// Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
// or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
// generating new commitment transactions with exactly the same criteria as inbound/outbound
// HTLCs with similar state.
pending_update_fee: Option<(u32, FeeUpdateState)>,
// If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
// it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
// `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
// `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
// further `send_update_fee` calls, dropping the previous holding cell update entirely.
holding_cell_update_fee: Option<u32>,
next_holder_htlc_id: u64,
next_counterparty_htlc_id: u64,
feerate_per_kw: u32,
/// The timestamp set on our latest `channel_update` message for this channel. It is updated
/// when the channel is updated in ways which may impact the `channel_update` message or when a
/// new block is received, ensuring it's always at least moderately close to the current real
/// time.
update_time_counter: u32,
#[cfg(debug_assertions)]
/// Max to_local and to_remote outputs in a locally-generated commitment transaction
holder_max_commitment_tx_output: Mutex<(u64, u64)>,
#[cfg(debug_assertions)]
/// Max to_local and to_remote outputs in a remote-generated commitment transaction
counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
target_closing_feerate_sats_per_kw: Option<u32>,
/// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
/// update, we need to delay processing it until later. We do that here by simply storing the
/// closing_signed message and handling it in `maybe_propose_closing_signed`.
pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
/// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
/// transaction. These are set once we reach `closing_negotiation_ready`.
#[cfg(test)]
pub(crate) closing_fee_limits: Option<(u64, u64)>,
#[cfg(not(test))]
closing_fee_limits: Option<(u64, u64)>,
/// Flag that ensures that `accept_inbound_channel` must be called before `funding_created`
/// is executed successfully. The reason for this flag is that when the
/// `UserConfig::manually_accept_inbound_channels` config flag is set to true, inbound channels
/// are required to be manually accepted by the node operator before the `msgs::AcceptChannel`
/// message is created and sent out. During the manual accept process, `accept_inbound_channel`
/// is called by `ChannelManager::accept_inbound_channel`.
///
/// The flag counteracts that a counterparty node could theoretically send a
/// `msgs::FundingCreated` message before the node operator has manually accepted an inbound
/// channel request made by the counterparty node. That would execute `funding_created` before
/// `accept_inbound_channel`, and `funding_created` should therefore not execute successfully.
inbound_awaiting_accept: bool,
/// The hash of the block in which the funding transaction was included.
funding_tx_confirmed_in: Option<BlockHash>,
funding_tx_confirmation_height: u32,
short_channel_id: Option<u64>,
/// Either the height at which this channel was created or the height at which it was last
/// serialized if it was serialized by versions prior to 0.0.103.
/// We use this to close if funding is never broadcasted.
channel_creation_height: u32,
counterparty_dust_limit_satoshis: u64,
#[cfg(test)]
pub(super) holder_dust_limit_satoshis: u64,
#[cfg(not(test))]
holder_dust_limit_satoshis: u64,
#[cfg(test)]
pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
#[cfg(not(test))]
counterparty_max_htlc_value_in_flight_msat: u64,
#[cfg(test)]
pub(super) holder_max_htlc_value_in_flight_msat: u64,
#[cfg(not(test))]
holder_max_htlc_value_in_flight_msat: u64,
/// minimum channel reserve for self to maintain - set by them.
counterparty_selected_channel_reserve_satoshis: Option<u64>,
#[cfg(test)]
pub(super) holder_selected_channel_reserve_satoshis: u64,
#[cfg(not(test))]
holder_selected_channel_reserve_satoshis: u64,
counterparty_htlc_minimum_msat: u64,
holder_htlc_minimum_msat: u64,
#[cfg(test)]
pub counterparty_max_accepted_htlcs: u16,
#[cfg(not(test))]
counterparty_max_accepted_htlcs: u16,
holder_max_accepted_htlcs: u16,
minimum_depth: Option<u32>,
counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
funding_transaction: Option<Transaction>,
counterparty_cur_commitment_point: Option<PublicKey>,
counterparty_prev_commitment_point: Option<PublicKey>,
counterparty_node_id: PublicKey,
counterparty_shutdown_scriptpubkey: Option<Script>,
commitment_secrets: CounterpartyCommitmentSecrets,
channel_update_status: ChannelUpdateStatus,
/// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
/// not complete within a single timer tick (one minute), we should force-close the channel.
/// This prevents us from keeping unusable channels around forever if our counterparty wishes
/// to DoS us.
/// Note that this field is reset to false on deserialization to give us a chance to connect to
/// our peer and start the closing_signed negotiation fresh.
closing_signed_in_flight: bool,
/// Our counterparty's channel_announcement signatures provided in announcement_signatures.
/// This can be used to rebroadcast the channel_announcement message later.
announcement_sigs: Option<(Signature, Signature)>,
// We save these values so we can make sure `next_local_commit_tx_fee_msat` and
// `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
// be, by comparing the cached values to the fee of the tranaction generated by
// `build_commitment_transaction`.
#[cfg(any(test, fuzzing))]
next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
#[cfg(any(test, fuzzing))]
next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
/// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
/// they will not send a channel_reestablish until the channel locks in. Then, they will send a
/// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
/// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
/// message until we receive a channel_reestablish.
///
/// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
#[cfg(any(test, fuzzing))]
// When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
// corresponding HTLC on the inbound path. If, then, the outbound path channel is
// disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
// messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
// is fine, but as a sanity check in our failure to generate the second claim, we check here
// that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
historical_inbound_htlc_fulfills: HashSet<u64>,
/// This channel's type, as negotiated during channel open
channel_type: ChannelTypeFeatures,
// Our counterparty can offer us SCID aliases which they will map to this channel when routing
// outbound payments. These can be used in invoice route hints to avoid explicitly revealing
// the channel's funding UTXO.
//
// We also use this when sending our peer a channel_update that isn't to be broadcasted
// publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
// associated channel mapping.
//
// We only bother storing the most recent SCID alias at any time, though our counterparty has
// to store all of them.
latest_inbound_scid_alias: Option<u64>,
// We always offer our counterparty a static SCID alias, which we recognize as for this channel
// if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
// don't currently support node id aliases and eventually privacy should be provided with
// blinded paths instead of simple scid+node_id aliases.
outbound_scid_alias: u64,
// We track whether we already emitted a `ChannelPending` event.
channel_pending_event_emitted: bool,
// We track whether we already emitted a `ChannelReady` event.
channel_ready_event_emitted: bool,
/// The unique identifier used to re-derive the private key material for the channel through
/// [`SignerProvider::derive_channel_signer`].
channel_keys_id: [u8; 32],
/// When we generate [`ChannelMonitorUpdate`]s to persist, they may not be persisted immediately.
/// If we then persist the [`channelmanager::ChannelManager`] and crash before the persistence
/// completes we still need to be able to complete the persistence. Thus, we have to keep a
/// copy of the [`ChannelMonitorUpdate`] here until it is complete.
pending_monitor_updates: Vec<ChannelMonitorUpdate>,
}
#[cfg(any(test, fuzzing))]
struct CommitmentTxInfoCached {
fee: u64,
total_pending_htlcs: usize,
next_holder_htlc_id: u64,
next_counterparty_htlc_id: u64,
feerate: u32,
}
pub const DEFAULT_MAX_HTLCS: u16 = 50;
pub(crate) fn commitment_tx_base_weight(opt_anchors: bool) -> u64 {
const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
if opt_anchors { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
}
#[cfg(not(test))]
const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
#[cfg(test)]
pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
/// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
/// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
/// although LDK 0.0.104+ enabled serialization of channels with a different value set for
/// `holder_max_htlc_value_in_flight_msat`.
pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
/// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
/// `option_support_large_channel` (aka wumbo channels) is not supported.
/// It's 2^24 - 1.
pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
/// Total bitcoin supply in satoshis.
pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
/// The maximum network dust limit for standard script formats. This currently represents the
/// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
/// transaction non-standard and thus refuses to relay it.
/// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
/// implementations use this value for their dust limit today.
pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
/// The maximum channel dust limit we will accept from our counterparty.
pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
/// The dust limit is used for both the commitment transaction outputs as well as the closing
/// transactions. For cooperative closing transactions, we require segwit outputs, though accept
/// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
/// In order to avoid having to concern ourselves with standardness during the closing process, we
/// simply require our counterparty to use a dust limit which will leave any segwit output
/// standard.
/// See <https://github.com/lightning/bolts/issues/905> for more details.
pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
// Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
/// Used to return a simple Error back to ChannelManager. Will get converted to a
/// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
/// channel_id in ChannelManager.
pub(super) enum ChannelError {
Ignore(String),
Warn(String),
Close(String),
}
impl fmt::Debug for ChannelError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
&ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
&ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
&ChannelError::Close(ref e) => write!(f, "Close : {}", e),
}
}
}
macro_rules! secp_check {
($res: expr, $err: expr) => {
match $res {
Ok(thing) => thing,
Err(_) => return Err(ChannelError::Close($err)),
}
};
}
impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
/// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
/// `channel_value_satoshis` in msat, set through
/// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
///
/// The effective percentage is lower bounded by 1% and upper bounded by 100%.
///
/// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
1
} else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
100
} else {
config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
};
channel_value_satoshis * 10 * configured_percent
}
/// Returns a minimum channel reserve value the remote needs to maintain,
/// required by us according to the configured or default
/// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
///
/// Guaranteed to return a value no larger than channel_value_satoshis
///
/// This is used both for outbound and inbound channels and has lower bound
/// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
}
/// This is for legacy reasons, present for forward-compatibility.
/// LDK versions older than 0.0.104 don't know how read/handle values other than default
/// from storage. Hence, we use this function to not persist default values of
/// `holder_selected_channel_reserve_satoshis` for channels into storage.
pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
let (q, _) = channel_value_satoshis.overflowing_div(100);
cmp::min(channel_value_satoshis, cmp::max(q, 1000))
}
pub(crate) fn opt_anchors(&self) -> bool {
self.channel_transaction_parameters.opt_anchors.is_some()
}
fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
// The default channel type (ie the first one we try) depends on whether the channel is
// public - if it is, we just go with `only_static_remotekey` as it's the only option
// available. If it's private, we first try `scid_privacy` as it provides better privacy
// with no other changes, and fall back to `only_static_remotekey`.
let mut ret = ChannelTypeFeatures::only_static_remote_key();
if !config.channel_handshake_config.announced_channel &&
config.channel_handshake_config.negotiate_scid_privacy &&
their_features.supports_scid_privacy() {
ret.set_scid_privacy_required();
}
// Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
// set it now. If they don't understand it, we'll fall back to our default of
// `only_static_remotekey`.
#[cfg(anchors)]
{ // Attributes are not allowed on if expressions on our current MSRV of 1.41.
if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
their_features.supports_anchors_zero_fee_htlc_tx() {
ret.set_anchors_zero_fee_htlc_tx_required();
}
}
ret
}
/// If we receive an error message, it may only be a rejection of the channel type we tried,
/// not of our ability to open any channel at all. Thus, on error, we should first call this
/// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
pub(crate) fn maybe_handle_error_without_close(&mut self, chain_hash: BlockHash) -> Result<msgs::OpenChannel, ()> {
if !self.is_outbound() || self.channel_state != ChannelState::OurInitSent as u32 { return Err(()); }
if self.channel_type == ChannelTypeFeatures::only_static_remote_key() {
// We've exhausted our options
return Err(());
}
// We support opening a few different types of channels. Try removing our additional
// features one by one until we've either arrived at our default or the counterparty has
// accepted one.
//
// Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
// counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
// checks whether the counterparty supports every feature, this would only happen if the
// counterparty is advertising the feature, but rejecting channels proposing the feature for
// whatever reason.
if self.channel_type.supports_anchors_zero_fee_htlc_tx() {
self.channel_type.clear_anchors_zero_fee_htlc_tx();
assert!(self.channel_transaction_parameters.opt_non_zero_fee_anchors.is_none());
self.channel_transaction_parameters.opt_anchors = None;
} else if self.channel_type.supports_scid_privacy() {
self.channel_type.clear_scid_privacy();
} else {
self.channel_type = ChannelTypeFeatures::only_static_remote_key();
}
Ok(self.get_open_channel(chain_hash))
}
// Constructors:
pub fn new_outbound<ES: Deref, SP: Deref, F: Deref>(
fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
outbound_scid_alias: u64
) -> Result<Channel<Signer>, APIError>
where ES::Target: EntropySource,
SP::Target: SignerProvider<Signer = Signer>,
F::Target: FeeEstimator,
{
let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
let pubkeys = holder_signer.pubkeys().clone();
if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
}
if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
}
let channel_value_msat = channel_value_satoshis * 1000;
if push_msat > channel_value_msat {
return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
}
if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
}
let holder_selected_channel_reserve_satoshis = Channel::<Signer>::get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
// Protocol level safety check in place, although it should never happen because
// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
}
let channel_type = Self::get_initial_channel_type(&config, their_features);
debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
let feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
let commitment_tx_fee = Self::commit_tx_fee_msat(feerate, MIN_AFFORDABLE_HTLC_COUNT, channel_type.requires_anchors_zero_fee_htlc_tx());
if value_to_self_msat < commitment_tx_fee {
return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
}
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
match signer_provider.get_shutdown_scriptpubkey() {
Ok(scriptpubkey) => Some(scriptpubkey),
Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
}
} else { None };
if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
if !shutdown_scriptpubkey.is_compatible(&their_features) {
return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
}
}