Skip to content

Commit

Permalink
Implement some rate limiting for onion messages.
Browse files Browse the repository at this point in the history
In this commit, we add business logic for checking if a peer's outbound buffer
has room for onion messages, and if so pulls them from an implementer of a new
trait, OnionMessageProvider.

Makes sure channel messages are prioritized over OMs.

The onion_message module remains private until further rate limiting is added.
  • Loading branch information
valentinewallace committed Aug 16, 2022
1 parent 356ec9b commit 0e2ee89
Show file tree
Hide file tree
Showing 2 changed files with 47 additions and 0 deletions.
37 changes: 37 additions & 0 deletions lightning/src/ln/peer_handler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -306,6 +306,10 @@ enum InitSyncTracker{
/// forwarding gossip messages to peers altogether.
const FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO: usize = 2;

/// The ratio between buffer sizes at which we stop sending initial sync messages vs when we pause
/// forwarding onion messages to peers altogether.
const OM_BUFFER_LIMIT_RATIO: usize = 2;

/// When the outbound buffer has this many messages, we'll stop reading bytes from the peer until
/// we have fewer than this many messages in the outbound buffer again.
/// We also use this as the target number of outbound gossip messages to keep in the write buffer,
Expand All @@ -315,6 +319,10 @@ const OUTBOUND_BUFFER_LIMIT_READ_PAUSE: usize = 10;
/// the peer.
const OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP: usize = OUTBOUND_BUFFER_LIMIT_READ_PAUSE * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO;

/// When the outbound buffer has this many messages, we won't poll for new onion messages for this
/// peer.
const OUTBOUND_BUFFER_LIMIT_PAUSE_OMS: usize = 8;

/// If we've sent a ping, and are still awaiting a response, we may need to churn our way through
/// the socket receive buffer before receiving the ping.
///
Expand Down Expand Up @@ -417,6 +425,13 @@ impl Peer {
}
true
}

/// Returns the number of onion messages we can fit in this peer's buffer.
fn onion_message_buffer_slots_available(&self) -> usize {
cmp::min(
OUTBOUND_BUFFER_LIMIT_PAUSE_OMS.saturating_sub(self.pending_outbound_buffer.len()),
(BUFFER_DRAIN_MSGS_PER_TICK * OM_BUFFER_LIMIT_RATIO).saturating_sub(self.msgs_sent_since_pong))
}
}

/// SimpleArcPeerManager is useful when you need a PeerManager with a static lifetime, e.g.
Expand Down Expand Up @@ -824,8 +839,11 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
/// ready to call `[write_buffer_space_avail`] again if a write call generated here isn't
/// sufficient!
///
/// If any bytes are written, [`process_events`] should be called afterwards.
///
/// [`send_data`]: SocketDescriptor::send_data
/// [`write_buffer_space_avail`]: PeerManager::write_buffer_space_avail
/// [`process_events`]: PeerManager::process_events
pub fn write_buffer_space_avail(&self, descriptor: &mut Descriptor) -> Result<(), PeerHandleError> {
let peers = self.peers.read().unwrap();
match peers.get(descriptor) {
Expand Down Expand Up @@ -1669,6 +1687,25 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM

for (descriptor, peer_mutex) in peers.iter() {
self.do_attempt_write_data(&mut (*descriptor).clone(), &mut *peer_mutex.lock().unwrap());

// Only see if we have room for onion messages after we've written all channel messages, to
// ensure the latter take priority.
loop {
let (peer_node_id, om_buffer_slots_avail) = {
let peer = peer_mutex.lock().unwrap();
if let Some(peer_node_id) = peer.their_node_id {
(peer_node_id, peer.onion_message_buffer_slots_available())
} else { break; }
};
if om_buffer_slots_avail == 0 { break; }
let onion_msgs = self.message_handler.onion_message_handler.next_onion_messages_for_peer(
peer_node_id, om_buffer_slots_avail);
if onion_msgs.len() == 0 { break; }
for msg in onion_msgs {
self.enqueue_message(&mut *get_peer_for_forwarding!(&peer_node_id), &msg);
}
self.do_attempt_write_data(&mut (*descriptor).clone(), &mut *peer_mutex.lock().unwrap());
}
}
}
if !peers_to_disconnect.is_empty() {
Expand Down
10 changes: 10 additions & 0 deletions lightning/src/onion_message/messenger.rs
Original file line number Diff line number Diff line change
Expand Up @@ -292,6 +292,16 @@ impl<Signer: Sign, K: Deref, L: Deref> OnionMessageProvider for OnionMessenger<S
L::Target: Logger,
{
fn next_onion_messages_for_peer(&self, peer_node_id: PublicKey, max_messages: usize) -> Vec<msgs::OnionMessage> {
let mut pending_msgs = self.pending_messages.lock().unwrap();
if let Some(msgs) = pending_msgs.get_mut(&peer_node_id) {
if max_messages >= msgs.len() {
let mut peer_pending_msgs = Vec::new();
mem::swap(msgs, &mut peer_pending_msgs);
return peer_pending_msgs
} else {
return msgs.split_off(max_messages)
}
}
Vec::new()
}
}
Expand Down

0 comments on commit 0e2ee89

Please sign in to comment.