From d3bc9e7b74f9053e5044c561d0f2f9ac4b06d15e Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Tue, 23 May 2023 01:35:05 +0200 Subject: [PATCH 001/126] Use SyncMessage in replication network behaviour --- .../src/network/replication/behaviour.rs | 55 +++++++++++++------ aquadoggo/src/network/replication/handler.rs | 11 ++-- aquadoggo/src/network/replication/mod.rs | 2 +- aquadoggo/src/network/replication/protocol.rs | 11 +--- 4 files changed, 47 insertions(+), 32 deletions(-) diff --git a/aquadoggo/src/network/replication/behaviour.rs b/aquadoggo/src/network/replication/behaviour.rs index e6f54d039..9ed54ce43 100644 --- a/aquadoggo/src/network/replication/behaviour.rs +++ b/aquadoggo/src/network/replication/behaviour.rs @@ -4,6 +4,7 @@ use std::collections::VecDeque; use std::task::{Context, Poll}; use libp2p::core::Endpoint; +use libp2p::swarm::derive_prelude::ConnectionEstablished; use libp2p::swarm::{ ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, NotifyHandler, PollParameters, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, @@ -11,11 +12,11 @@ use libp2p::swarm::{ use libp2p::{Multiaddr, PeerId}; use crate::network::replication::handler::{Handler, HandlerInEvent, HandlerOutEvent}; -use crate::network::replication::protocol::Message; +use crate::replication::{Message, SyncMessage}; #[derive(Debug)] pub enum BehaviourOutEvent { - MessageReceived(PeerId, Message), + MessageReceived(PeerId, SyncMessage), Error, } @@ -33,7 +34,7 @@ impl Behaviour { } impl Behaviour { - fn send_message(&mut self, peer_id: PeerId, message: Message) { + fn send_message(&mut self, peer_id: PeerId, message: SyncMessage) { self.events.push_back(ToSwarm::NotifyHandler { peer_id, event: HandlerInEvent::Message(message), @@ -41,7 +42,7 @@ impl Behaviour { }); } - fn handle_received_message(&mut self, peer_id: &PeerId, message: Message) { + fn handle_received_message(&mut self, peer_id: &PeerId, message: SyncMessage) { // @TODO: Handle incoming messages self.events .push_back(ToSwarm::GenerateEvent(BehaviourOutEvent::MessageReceived( @@ -90,8 +91,17 @@ impl NetworkBehaviour for Behaviour { fn on_swarm_event(&mut self, event: FromSwarm) { match event { - FromSwarm::ConnectionEstablished(_) - | FromSwarm::ConnectionClosed(_) + FromSwarm::ConnectionEstablished(ConnectionEstablished { + peer_id, + connection_id, + endpoint, + failed_addresses, + other_established, + }) => self.send_message( + peer_id, + SyncMessage::new(0, Message::SyncRequest(0, vec![])), + ), + FromSwarm::ConnectionClosed(_) | FromSwarm::AddressChange(_) | FromSwarm::DialFailure(_) | FromSwarm::ListenFailure(_) @@ -124,7 +134,7 @@ mod tests { use libp2p::swarm::{keep_alive, Swarm}; use libp2p_swarm_test::SwarmExt; - use crate::network::replication::Message; + use crate::replication::{Message, SyncMessage}; use super::{Behaviour as ReplicationBehaviour, BehaviourOutEvent}; @@ -189,9 +199,10 @@ mod tests { assert_eq!(info2.connection_counters().num_established(), 1); // Send a message from to swarm1 local peer from swarm2 local peer. - swarm1 - .behaviour_mut() - .send_message(swarm2_peer_id, Message::Dummy(0)); + swarm1.behaviour_mut().send_message( + swarm2_peer_id, + SyncMessage::new(0, Message::SyncRequest(0, vec![])), + ); // Await a swarm event on swarm2. // @@ -220,14 +231,16 @@ mod tests { let swarm2_peer_id = *swarm2.local_peer_id(); // Send a message from to swarm1 local peer from swarm2 local peer. - swarm1 - .behaviour_mut() - .send_message(swarm2_peer_id, Message::Dummy(0)); + swarm1.behaviour_mut().send_message( + swarm2_peer_id, + SyncMessage::new(0, Message::SyncRequest(0, vec![])), + ); // Send a message from to swarm2 local peer from swarm1 local peer. - swarm2 - .behaviour_mut() - .send_message(swarm1_peer_id, Message::Dummy(1)); + swarm2.behaviour_mut().send_message( + swarm1_peer_id, + SyncMessage::new(0, Message::SyncRequest(0, vec![])), + ); // Collect the next 2 behaviour events which occur in either swarms. for _ in 0..2 { @@ -244,11 +257,17 @@ mod tests { // swarm1 should have received the message from swarm2 peer. let (peer_id, message) = &res1[0]; assert_eq!(peer_id, &swarm2_peer_id); - assert_eq!(message, &Message::Dummy(1)); + assert_eq!( + message, + &SyncMessage::new(0, Message::SyncRequest(0, vec![])) + ); // swarm2 should have received the message from swarm1 peer. let (peer_id, message) = &res2[0]; assert_eq!(peer_id, &swarm1_peer_id); - assert_eq!(message, &Message::Dummy(0)); + assert_eq!( + message, + &SyncMessage::new(0, Message::SyncRequest(0, vec![])) + ); } } diff --git a/aquadoggo/src/network/replication/handler.rs b/aquadoggo/src/network/replication/handler.rs index b7b29631d..681ce230b 100644 --- a/aquadoggo/src/network/replication/handler.rs +++ b/aquadoggo/src/network/replication/handler.rs @@ -12,7 +12,8 @@ use libp2p::swarm::{ }; use thiserror::Error; -use crate::network::replication::{Codec, CodecError, Message, Protocol}; +use crate::network::replication::{Codec, CodecError, Protocol}; +use crate::replication::SyncMessage; pub struct Handler { /// Upgrade configuration for the replication protocol. @@ -29,7 +30,7 @@ pub struct Handler { outbound_substream_establishing: bool, /// Queue of messages that we want to send to the remote. - send_queue: VecDeque, + send_queue: VecDeque, /// Flag determining whether to maintain the connection to the peer. keep_alive: KeepAlive, @@ -72,7 +73,7 @@ impl Handler { #[derive(Debug)] pub enum HandlerInEvent { /// Replication message to send on outbound stream. - Message(Message), + Message(SyncMessage), } /// The event emitted by the connection handler. @@ -81,7 +82,7 @@ pub enum HandlerInEvent { #[derive(Debug)] pub enum HandlerOutEvent { /// Replication message received on the inbound stream. - Message(Message), + Message(SyncMessage), } #[derive(Debug, Error)] @@ -110,7 +111,7 @@ enum OutboundSubstreamState { WaitingOutput(Stream), /// Waiting to send a message to the remote. - PendingSend(Stream, Message), + PendingSend(Stream, SyncMessage), /// Waiting to flush the substream so that the data arrives to the remote. PendingFlush(Stream), diff --git a/aquadoggo/src/network/replication/mod.rs b/aquadoggo/src/network/replication/mod.rs index 05593e59d..80d1c702f 100644 --- a/aquadoggo/src/network/replication/mod.rs +++ b/aquadoggo/src/network/replication/mod.rs @@ -6,4 +6,4 @@ mod protocol; pub use behaviour::Behaviour; pub use handler::Handler; -pub use protocol::{Codec, CodecError, Message, Protocol, PROTOCOL_NAME}; +pub use protocol::{Codec, CodecError, Protocol, PROTOCOL_NAME}; diff --git a/aquadoggo/src/network/replication/protocol.rs b/aquadoggo/src/network/replication/protocol.rs index f4192f6c2..a73edca2b 100644 --- a/aquadoggo/src/network/replication/protocol.rs +++ b/aquadoggo/src/network/replication/protocol.rs @@ -6,19 +6,14 @@ use asynchronous_codec::{CborCodec, CborCodecError, Framed}; use futures::{future, AsyncRead, AsyncWrite, Future}; use libp2p::core::UpgradeInfo; use libp2p::{InboundUpgrade, OutboundUpgrade}; -use serde::{Deserialize, Serialize}; + +use crate::replication::SyncMessage; pub const PROTOCOL_NAME: &[u8] = b"/p2p/p2panda/1.0.0"; pub type CodecError = CborCodecError; -pub type Codec = CborCodec; - -// @TODO: Get this from our other replication module -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub enum Message { - Dummy(u64), -} +pub type Codec = CborCodec; #[derive(Clone, Debug)] pub struct Protocol; From 2d2c05c69144ee73276b61dbd95832ab5611a2c4 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Tue, 23 May 2023 01:37:50 +0200 Subject: [PATCH 002/126] Use target set in sync request --- aquadoggo/src/network/replication/behaviour.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/aquadoggo/src/network/replication/behaviour.rs b/aquadoggo/src/network/replication/behaviour.rs index 9ed54ce43..d852f8d96 100644 --- a/aquadoggo/src/network/replication/behaviour.rs +++ b/aquadoggo/src/network/replication/behaviour.rs @@ -12,7 +12,7 @@ use libp2p::swarm::{ use libp2p::{Multiaddr, PeerId}; use crate::network::replication::handler::{Handler, HandlerInEvent, HandlerOutEvent}; -use crate::replication::{Message, SyncMessage}; +use crate::replication::{Message, SyncMessage, TargetSet}; #[derive(Debug)] pub enum BehaviourOutEvent { @@ -99,7 +99,7 @@ impl NetworkBehaviour for Behaviour { other_established, }) => self.send_message( peer_id, - SyncMessage::new(0, Message::SyncRequest(0, vec![])), + SyncMessage::new(0, Message::SyncRequest(0, TargetSet::new(&vec![]))), ), FromSwarm::ConnectionClosed(_) | FromSwarm::AddressChange(_) @@ -134,7 +134,7 @@ mod tests { use libp2p::swarm::{keep_alive, Swarm}; use libp2p_swarm_test::SwarmExt; - use crate::replication::{Message, SyncMessage}; + use crate::replication::{Message, SyncMessage, TargetSet}; use super::{Behaviour as ReplicationBehaviour, BehaviourOutEvent}; @@ -201,7 +201,7 @@ mod tests { // Send a message from to swarm1 local peer from swarm2 local peer. swarm1.behaviour_mut().send_message( swarm2_peer_id, - SyncMessage::new(0, Message::SyncRequest(0, vec![])), + SyncMessage::new(0, Message::SyncRequest(0, TargetSet::new(&vec![]))), ); // Await a swarm event on swarm2. @@ -233,13 +233,13 @@ mod tests { // Send a message from to swarm1 local peer from swarm2 local peer. swarm1.behaviour_mut().send_message( swarm2_peer_id, - SyncMessage::new(0, Message::SyncRequest(0, vec![])), + SyncMessage::new(0, Message::SyncRequest(0, TargetSet::new(&vec![]))), ); // Send a message from to swarm2 local peer from swarm1 local peer. swarm2.behaviour_mut().send_message( swarm1_peer_id, - SyncMessage::new(0, Message::SyncRequest(0, vec![])), + SyncMessage::new(0, Message::SyncRequest(0, TargetSet::new(&vec![]))), ); // Collect the next 2 behaviour events which occur in either swarms. @@ -259,7 +259,7 @@ mod tests { assert_eq!(peer_id, &swarm2_peer_id); assert_eq!( message, - &SyncMessage::new(0, Message::SyncRequest(0, vec![])) + &SyncMessage::new(0, Message::SyncRequest(0, TargetSet::new(&vec![]))) ); // swarm2 should have received the message from swarm1 peer. @@ -267,7 +267,7 @@ mod tests { assert_eq!(peer_id, &swarm1_peer_id); assert_eq!( message, - &SyncMessage::new(0, Message::SyncRequest(0, vec![])) + &SyncMessage::new(0, Message::SyncRequest(0, TargetSet::new(&vec![]))) ); } } From d812c064afc0d68af8b9fc582ef557dbd69eb882 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Tue, 23 May 2023 01:39:39 +0200 Subject: [PATCH 003/126] Convert integer to Mode --- aquadoggo/src/network/replication/behaviour.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/aquadoggo/src/network/replication/behaviour.rs b/aquadoggo/src/network/replication/behaviour.rs index d852f8d96..b94737a72 100644 --- a/aquadoggo/src/network/replication/behaviour.rs +++ b/aquadoggo/src/network/replication/behaviour.rs @@ -99,7 +99,7 @@ impl NetworkBehaviour for Behaviour { other_established, }) => self.send_message( peer_id, - SyncMessage::new(0, Message::SyncRequest(0, TargetSet::new(&vec![]))), + SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))), ), FromSwarm::ConnectionClosed(_) | FromSwarm::AddressChange(_) @@ -201,7 +201,7 @@ mod tests { // Send a message from to swarm1 local peer from swarm2 local peer. swarm1.behaviour_mut().send_message( swarm2_peer_id, - SyncMessage::new(0, Message::SyncRequest(0, TargetSet::new(&vec![]))), + SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))), ); // Await a swarm event on swarm2. @@ -233,13 +233,13 @@ mod tests { // Send a message from to swarm1 local peer from swarm2 local peer. swarm1.behaviour_mut().send_message( swarm2_peer_id, - SyncMessage::new(0, Message::SyncRequest(0, TargetSet::new(&vec![]))), + SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))), ); // Send a message from to swarm2 local peer from swarm1 local peer. swarm2.behaviour_mut().send_message( swarm1_peer_id, - SyncMessage::new(0, Message::SyncRequest(0, TargetSet::new(&vec![]))), + SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))), ); // Collect the next 2 behaviour events which occur in either swarms. @@ -259,7 +259,7 @@ mod tests { assert_eq!(peer_id, &swarm2_peer_id); assert_eq!( message, - &SyncMessage::new(0, Message::SyncRequest(0, TargetSet::new(&vec![]))) + &SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))) ); // swarm2 should have received the message from swarm1 peer. @@ -267,7 +267,7 @@ mod tests { assert_eq!(peer_id, &swarm1_peer_id); assert_eq!( message, - &SyncMessage::new(0, Message::SyncRequest(0, TargetSet::new(&vec![]))) + &SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))) ); } } From 3c2d6966445339262276dc32cfac03bc248a7146 Mon Sep 17 00:00:00 2001 From: adz Date: Tue, 23 May 2023 01:45:03 +0200 Subject: [PATCH 004/126] Add replication to main behaviour struct --- aquadoggo/src/network/behaviour.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/aquadoggo/src/network/behaviour.rs b/aquadoggo/src/network/behaviour.rs index 8886f905a..4673d8eab 100644 --- a/aquadoggo/src/network/behaviour.rs +++ b/aquadoggo/src/network/behaviour.rs @@ -42,9 +42,11 @@ pub struct Behaviour { /// Register with a rendezvous server and query remote peer addresses. pub rendezvous_client: Toggle, - /// Serve as a rendezvous point for remote peers to register their external addresses - /// and query the addresses of other peers. + /// Serve as a rendezvous point for remote peers to register their external addresses and query + /// the addresses of other peers. pub rendezvous_server: Toggle, + + pub replication: crate::network::replication::Behaviour, } impl Behaviour { @@ -132,6 +134,8 @@ impl Behaviour { None }; + let replication = crate::network::replication::Behaviour::new(); + Ok(Self { autonat: autonat.into(), identify: identify.into(), @@ -142,6 +146,7 @@ impl Behaviour { rendezvous_server: rendezvous_server.into(), relay_client: relay_client.into(), relay_server: relay_server.into(), + replication, }) } } From 9e522a20ea5775888ef13912a9f13426643fa507 Mon Sep 17 00:00:00 2001 From: adz Date: Tue, 23 May 2023 02:04:16 +0200 Subject: [PATCH 005/126] Add SyncManager to replication behaviour --- aquadoggo/src/network/behaviour.rs | 14 +- .../src/network/replication/behaviour.rs | 287 +++++++++++------- aquadoggo/src/network/service.rs | 9 +- aquadoggo/src/network/swarm.rs | 16 +- 4 files changed, 210 insertions(+), 116 deletions(-) diff --git a/aquadoggo/src/network/behaviour.rs b/aquadoggo/src/network/behaviour.rs index 4673d8eab..08a15f1a6 100644 --- a/aquadoggo/src/network/behaviour.rs +++ b/aquadoggo/src/network/behaviour.rs @@ -7,8 +7,13 @@ use libp2p::swarm::NetworkBehaviour; use libp2p::{autonat, connection_limits, identify, mdns, ping, relay, rendezvous, PeerId}; use log::debug; +use crate::bus::ServiceSender; +use crate::db::SqlStore; use crate::network::config::NODE_NAMESPACE; +use crate::network::replication; use crate::network::NetworkConfiguration; +use crate::replication::SyncIngest; +use crate::schema::SchemaProvider; /// Network behaviour for the aquadoggo node. #[derive(NetworkBehaviour)] @@ -46,7 +51,7 @@ pub struct Behaviour { /// the addresses of other peers. pub rendezvous_server: Toggle, - pub replication: crate::network::replication::Behaviour, + pub replication: replication::Behaviour, } impl Behaviour { @@ -55,6 +60,9 @@ impl Behaviour { pub fn new( network_config: &NetworkConfiguration, peer_id: PeerId, + store: &SqlStore, + schema_provider: &SchemaProvider, + tx: ServiceSender, key_pair: Keypair, relay_client: Option, ) -> Result { @@ -105,6 +113,7 @@ impl Behaviour { // address has been provided let rendezvous_client = if network_config.rendezvous_address.is_some() { debug!("Rendezvous client network behaviour enabled"); + // @TODO: Why does this need the whole key pair?! Some(rendezvous::client::Behaviour::new(key_pair)) } else { None @@ -134,7 +143,8 @@ impl Behaviour { None }; - let replication = crate::network::replication::Behaviour::new(); + let ingest = SyncIngest::new(schema_provider.clone(), tx); + let replication = replication::Behaviour::new(store, ingest, &peer_id); Ok(Self { autonat: autonat.into(), diff --git a/aquadoggo/src/network/replication/behaviour.rs b/aquadoggo/src/network/replication/behaviour.rs index b94737a72..c32052083 100644 --- a/aquadoggo/src/network/replication/behaviour.rs +++ b/aquadoggo/src/network/replication/behaviour.rs @@ -11,8 +11,9 @@ use libp2p::swarm::{ }; use libp2p::{Multiaddr, PeerId}; +use crate::db::SqlStore; use crate::network::replication::handler::{Handler, HandlerInEvent, HandlerOutEvent}; -use crate::replication::{Message, SyncMessage, TargetSet}; +use crate::replication::{Message, SyncIngest, SyncManager, SyncMessage, TargetSet}; #[derive(Debug)] pub enum BehaviourOutEvent { @@ -23,12 +24,14 @@ pub enum BehaviourOutEvent { #[derive(Debug)] pub struct Behaviour { events: VecDeque>, + manager: SyncManager, } impl Behaviour { - pub fn new() -> Self { + pub fn new(store: &SqlStore, ingest: SyncIngest, peer_id: &PeerId) -> Self { Self { events: VecDeque::new(), + manager: SyncManager::new(store.clone(), ingest, peer_id.clone()), } } } @@ -132,142 +135,202 @@ impl NetworkBehaviour for Behaviour { mod tests { use futures::FutureExt; use libp2p::swarm::{keep_alive, Swarm}; + use libp2p::PeerId; use libp2p_swarm_test::SwarmExt; + use tokio::sync::broadcast; - use crate::replication::{Message, SyncMessage, TargetSet}; + use crate::replication::{Message, SyncIngest, SyncMessage, TargetSet}; + use crate::test_utils::{test_runner, test_runner_with_manager, TestNode, TestNodeManager}; use super::{Behaviour as ReplicationBehaviour, BehaviourOutEvent}; #[tokio::test] async fn peers_connect() { - // Create two swarms - let mut swarm1 = Swarm::new_ephemeral(|_| ReplicationBehaviour::new()); - let mut swarm2 = Swarm::new_ephemeral(|_| ReplicationBehaviour::new()); - - // Listen on swarm1 and connect from swarm2, this should establish a bi-directional connection. - swarm1.listen().await; - swarm2.connect(&mut swarm1).await; - - let swarm1_peer_id = *swarm1.local_peer_id(); - let swarm2_peer_id = *swarm2.local_peer_id(); - - let info1 = swarm1.network_info(); - let info2 = swarm2.network_info(); - - // Peers should be connected. - assert!(swarm2.is_connected(&swarm1_peer_id)); - assert!(swarm1.is_connected(&swarm2_peer_id)); - - // Each swarm should have exactly one connected peer. - assert_eq!(info1.num_peers(), 1); - assert_eq!(info2.num_peers(), 1); - - // Each swarm should have one established connection. - assert_eq!(info1.connection_counters().num_established(), 1); - assert_eq!(info2.connection_counters().num_established(), 1); + let (tx, _rx) = broadcast::channel(8); + + test_runner_with_manager(|manager: TestNodeManager| async move { + let mut node_a = manager.create().await; + let mut node_b = manager.create().await; + + let peer_id_a = PeerId::random(); + let peer_id_b = PeerId::random(); + + // Create two swarms + let mut swarm1 = Swarm::new_ephemeral(|_| { + ReplicationBehaviour::new( + &node_a.context.store, + SyncIngest::new(node_a.context.schema_provider.clone(), tx.clone()), + &peer_id_a, + ) + }); + let mut swarm2 = Swarm::new_ephemeral(|_| { + ReplicationBehaviour::new( + &node_a.context.store, + SyncIngest::new(node_b.context.schema_provider.clone(), tx.clone()), + &peer_id_b, + ) + }); + + // Listen on swarm1 and connect from swarm2, this should establish a bi-directional + // connection. + swarm1.listen().await; + swarm2.connect(&mut swarm1).await; + + let swarm1_peer_id = *swarm1.local_peer_id(); + let swarm2_peer_id = *swarm2.local_peer_id(); + + let info1 = swarm1.network_info(); + let info2 = swarm2.network_info(); + + // Peers should be connected. + assert!(swarm2.is_connected(&swarm1_peer_id)); + assert!(swarm1.is_connected(&swarm2_peer_id)); + + // Each swarm should have exactly one connected peer. + assert_eq!(info1.num_peers(), 1); + assert_eq!(info2.num_peers(), 1); + + // Each swarm should have one established connection. + assert_eq!(info1.connection_counters().num_established(), 1); + assert_eq!(info2.connection_counters().num_established(), 1); + }); } #[tokio::test] async fn incompatible_network_behaviour() { - // Create two swarms - let mut swarm1 = Swarm::new_ephemeral(|_| ReplicationBehaviour::new()); - let mut swarm2 = Swarm::new_ephemeral(|_| keep_alive::Behaviour); + let (tx, _rx) = broadcast::channel(8); + let peer_id = PeerId::random(); + + test_runner(|node: TestNode| async move { + // Create two swarms + let mut swarm1 = Swarm::new_ephemeral(|_| { + ReplicationBehaviour::new( + &node.context.store, + SyncIngest::new(node.context.schema_provider.clone(), tx.clone()), + &peer_id, + ) + }); - // Listen on swarm1 and connect from swarm2, this should establish a bi-directional connection. - swarm1.listen().await; - swarm2.connect(&mut swarm1).await; + let mut swarm2 = Swarm::new_ephemeral(|_| keep_alive::Behaviour); - let swarm1_peer_id = *swarm1.local_peer_id(); - let swarm2_peer_id = *swarm2.local_peer_id(); + // Listen on swarm1 and connect from swarm2, this should establish a bi-directional connection. + swarm1.listen().await; + swarm2.connect(&mut swarm1).await; - let info1 = swarm1.network_info(); - let info2 = swarm2.network_info(); + let swarm1_peer_id = *swarm1.local_peer_id(); + let swarm2_peer_id = *swarm2.local_peer_id(); - // Even though the network behaviours of our two peers are incompatible they still - // establish a connection. + let info1 = swarm1.network_info(); + let info2 = swarm2.network_info(); - // Peers should be connected. - assert!(swarm2.is_connected(&swarm1_peer_id)); - assert!(swarm1.is_connected(&swarm2_peer_id)); + // Even though the network behaviours of our two peers are incompatible they still + // establish a connection. - // Each swarm should have exactly one connected peer. - assert_eq!(info1.num_peers(), 1); - assert_eq!(info2.num_peers(), 1); + // Peers should be connected. + assert!(swarm2.is_connected(&swarm1_peer_id)); + assert!(swarm1.is_connected(&swarm2_peer_id)); - // Each swarm should have one established connection. - assert_eq!(info1.connection_counters().num_established(), 1); - assert_eq!(info2.connection_counters().num_established(), 1); + // Each swarm should have exactly one connected peer. + assert_eq!(info1.num_peers(), 1); + assert_eq!(info2.num_peers(), 1); - // Send a message from to swarm1 local peer from swarm2 local peer. - swarm1.behaviour_mut().send_message( - swarm2_peer_id, - SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))), - ); + // Each swarm should have one established connection. + assert_eq!(info1.connection_counters().num_established(), 1); + assert_eq!(info2.connection_counters().num_established(), 1); - // Await a swarm event on swarm2. - // - // We expect a timeout panic as no event will occur. - let result = std::panic::AssertUnwindSafe(swarm2.next_swarm_event()) - .catch_unwind() - .await; + // Send a message from to swarm1 local peer from swarm2 local peer. + swarm1.behaviour_mut().send_message( + swarm2_peer_id, + SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))), + ); + + // Await a swarm event on swarm2. + // + // We expect a timeout panic as no event will occur. + let result = std::panic::AssertUnwindSafe(swarm2.next_swarm_event()) + .catch_unwind() + .await; - assert!(result.is_err()) + assert!(result.is_err()) + }); } #[tokio::test] async fn swarm_behaviour_events() { - // Create two swarms - let mut swarm1 = Swarm::new_ephemeral(|_| ReplicationBehaviour::new()); - let mut swarm2 = Swarm::new_ephemeral(|_| ReplicationBehaviour::new()); - - // Listen on swarm1 and connect from swarm2, this should establish a bi-directional connection. - swarm1.listen().await; - swarm2.connect(&mut swarm1).await; - - let mut res1 = Vec::new(); - let mut res2 = Vec::new(); - - let swarm1_peer_id = *swarm1.local_peer_id(); - let swarm2_peer_id = *swarm2.local_peer_id(); - - // Send a message from to swarm1 local peer from swarm2 local peer. - swarm1.behaviour_mut().send_message( - swarm2_peer_id, - SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))), - ); - - // Send a message from to swarm2 local peer from swarm1 local peer. - swarm2.behaviour_mut().send_message( - swarm1_peer_id, - SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))), - ); - - // Collect the next 2 behaviour events which occur in either swarms. - for _ in 0..2 { - tokio::select! { - BehaviourOutEvent::MessageReceived(peer_id, message) = swarm1.next_behaviour_event() => res1.push((peer_id, message)), - BehaviourOutEvent::MessageReceived(peer_id, message) = swarm2.next_behaviour_event() => res2.push((peer_id, message)), + let (tx, _rx) = broadcast::channel(8); + + test_runner_with_manager(|manager: TestNodeManager| async move { + let mut node_a = manager.create().await; + let mut node_b = manager.create().await; + + let peer_id_a = PeerId::random(); + let peer_id_b = PeerId::random(); + + // Create two swarms + let mut swarm1 = Swarm::new_ephemeral(|_| { + ReplicationBehaviour::new( + &node_a.context.store, + SyncIngest::new(node_a.context.schema_provider.clone(), tx.clone()), + &peer_id_a, + ) + }); + let mut swarm2 = Swarm::new_ephemeral(|_| { + ReplicationBehaviour::new( + &node_a.context.store, + SyncIngest::new(node_b.context.schema_provider.clone(), tx.clone()), + &peer_id_b, + ) + }); + + // Listen on swarm1 and connect from swarm2, this should establish a bi-directional connection. + swarm1.listen().await; + swarm2.connect(&mut swarm1).await; + + let mut res1 = Vec::new(); + let mut res2 = Vec::new(); + + let swarm1_peer_id = *swarm1.local_peer_id(); + let swarm2_peer_id = *swarm2.local_peer_id(); + + // Send a message from to swarm1 local peer from swarm2 local peer. + swarm1.behaviour_mut().send_message( + swarm2_peer_id, + SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))), + ); + + // Send a message from to swarm2 local peer from swarm1 local peer. + swarm2.behaviour_mut().send_message( + swarm1_peer_id, + SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))), + ); + + // Collect the next 2 behaviour events which occur in either swarms. + for _ in 0..2 { + tokio::select! { + BehaviourOutEvent::MessageReceived(peer_id, message) = swarm1.next_behaviour_event() => res1.push((peer_id, message)), + BehaviourOutEvent::MessageReceived(peer_id, message) = swarm2.next_behaviour_event() => res2.push((peer_id, message)), + } } - } - // Each swarm should have emitted exactly one event. - assert_eq!(res1.len(), 1); - assert_eq!(res2.len(), 1); - - // swarm1 should have received the message from swarm2 peer. - let (peer_id, message) = &res1[0]; - assert_eq!(peer_id, &swarm2_peer_id); - assert_eq!( - message, - &SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))) - ); - - // swarm2 should have received the message from swarm1 peer. - let (peer_id, message) = &res2[0]; - assert_eq!(peer_id, &swarm1_peer_id); - assert_eq!( - message, - &SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))) - ); + // Each swarm should have emitted exactly one event. + assert_eq!(res1.len(), 1); + assert_eq!(res2.len(), 1); + + // swarm1 should have received the message from swarm2 peer. + let (peer_id, message) = &res1[0]; + assert_eq!(peer_id, &swarm2_peer_id); + assert_eq!( + message, + &SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))) + ); + + // swarm2 should have received the message from swarm1 peer. + let (peer_id, message) = &res2[0]; + assert_eq!(peer_id, &swarm1_peer_id); + assert_eq!( + message, + &SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))) + ); + }); } } diff --git a/aquadoggo/src/network/service.rs b/aquadoggo/src/network/service.rs index d7d8a22c0..2ad80055a 100644 --- a/aquadoggo/src/network/service.rs +++ b/aquadoggo/src/network/service.rs @@ -37,7 +37,14 @@ pub async fn network_service( let network_config = context.config.network.clone(); // Build the network swarm and retrieve the local peer ID - let (mut swarm, local_peer_id) = swarm::build_swarm(&network_config, key_pair).await?; + let (mut swarm, local_peer_id) = swarm::build_swarm( + &network_config, + &context.store, + &context.schema_provider, + tx.clone(), + key_pair, + ) + .await?; // Define the QUIC multiaddress on which the swarm will listen for connections let quic_multiaddr = diff --git a/aquadoggo/src/network/swarm.rs b/aquadoggo/src/network/swarm.rs index 2891871ce..92f3b9f0e 100644 --- a/aquadoggo/src/network/swarm.rs +++ b/aquadoggo/src/network/swarm.rs @@ -9,12 +9,18 @@ use libp2p::PeerId; use libp2p::Swarm; use log::info; +use crate::bus::ServiceSender; +use crate::db::SqlStore; use crate::network::behaviour::Behaviour; use crate::network::transport; use crate::network::NetworkConfiguration; +use crate::schema::SchemaProvider; pub async fn build_swarm( network_config: &NetworkConfiguration, + store: &SqlStore, + schema_provider: &SchemaProvider, + tx: ServiceSender, key_pair: Keypair, ) -> Result<(Swarm, PeerId)> { // Read the peer ID (public key) from the key pair @@ -28,7 +34,15 @@ pub async fn build_swarm( // Instantiate the custom network behaviour with default configuration // and the libp2p peer ID - let behaviour = Behaviour::new(network_config, peer_id, key_pair, relay_client)?; + let behaviour = Behaviour::new( + network_config, + peer_id, + store, + schema_provider, + tx, + key_pair, + relay_client, + )?; // Initialise a swarm with QUIC transports, our composed network behaviour // and the default configuration parameters From e73c3a700d6f87fa45ff061cae1a9f17e55e4be5 Mon Sep 17 00:00:00 2001 From: adz Date: Tue, 23 May 2023 02:23:09 +0200 Subject: [PATCH 006/126] Add schema provider to behaviour --- aquadoggo/src/network/behaviour.rs | 3 +- .../src/network/replication/behaviour.rs | 50 ++++++++++++------- 2 files changed, 34 insertions(+), 19 deletions(-) diff --git a/aquadoggo/src/network/behaviour.rs b/aquadoggo/src/network/behaviour.rs index 08a15f1a6..c1e25ba66 100644 --- a/aquadoggo/src/network/behaviour.rs +++ b/aquadoggo/src/network/behaviour.rs @@ -144,7 +144,8 @@ impl Behaviour { }; let ingest = SyncIngest::new(schema_provider.clone(), tx); - let replication = replication::Behaviour::new(store, ingest, &peer_id); + let replication = + replication::Behaviour::new(store, ingest, schema_provider.clone(), &peer_id); Ok(Self { autonat: autonat.into(), diff --git a/aquadoggo/src/network/replication/behaviour.rs b/aquadoggo/src/network/replication/behaviour.rs index c32052083..3eccf4437 100644 --- a/aquadoggo/src/network/replication/behaviour.rs +++ b/aquadoggo/src/network/replication/behaviour.rs @@ -14,6 +14,7 @@ use libp2p::{Multiaddr, PeerId}; use crate::db::SqlStore; use crate::network::replication::handler::{Handler, HandlerInEvent, HandlerOutEvent}; use crate::replication::{Message, SyncIngest, SyncManager, SyncMessage, TargetSet}; +use crate::schema::SchemaProvider; #[derive(Debug)] pub enum BehaviourOutEvent { @@ -25,13 +26,20 @@ pub enum BehaviourOutEvent { pub struct Behaviour { events: VecDeque>, manager: SyncManager, + schema_provider: SchemaProvider, } impl Behaviour { - pub fn new(store: &SqlStore, ingest: SyncIngest, peer_id: &PeerId) -> Self { + pub fn new( + store: &SqlStore, + ingest: SyncIngest, + schema_provider: SchemaProvider, + peer_id: &PeerId, + ) -> Self { Self { events: VecDeque::new(), manager: SyncManager::new(store.clone(), ingest, peer_id.clone()), + schema_provider, } } } @@ -52,6 +60,14 @@ impl Behaviour { *peer_id, message, ))); } + + fn handle_established_connection(&mut self, remote_peer_id: &PeerId) { + // @TODO: Have an async backend + self.send_message( + *remote_peer_id, + SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))), + ) + } } impl NetworkBehaviour for Behaviour { @@ -94,16 +110,9 @@ impl NetworkBehaviour for Behaviour { fn on_swarm_event(&mut self, event: FromSwarm) { match event { - FromSwarm::ConnectionEstablished(ConnectionEstablished { - peer_id, - connection_id, - endpoint, - failed_addresses, - other_established, - }) => self.send_message( - peer_id, - SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))), - ), + FromSwarm::ConnectionEstablished(ConnectionEstablished { peer_id, .. }) => { + self.handle_established_connection(&peer_id) + } FromSwarm::ConnectionClosed(_) | FromSwarm::AddressChange(_) | FromSwarm::DialFailure(_) @@ -149,8 +158,8 @@ mod tests { let (tx, _rx) = broadcast::channel(8); test_runner_with_manager(|manager: TestNodeManager| async move { - let mut node_a = manager.create().await; - let mut node_b = manager.create().await; + let node_a = manager.create().await; + let node_b = manager.create().await; let peer_id_a = PeerId::random(); let peer_id_b = PeerId::random(); @@ -160,6 +169,7 @@ mod tests { ReplicationBehaviour::new( &node_a.context.store, SyncIngest::new(node_a.context.schema_provider.clone(), tx.clone()), + node_a.context.schema_provider.clone(), &peer_id_a, ) }); @@ -167,6 +177,7 @@ mod tests { ReplicationBehaviour::new( &node_a.context.store, SyncIngest::new(node_b.context.schema_provider.clone(), tx.clone()), + node_b.context.schema_provider.clone(), &peer_id_b, ) }); @@ -198,15 +209,16 @@ mod tests { #[tokio::test] async fn incompatible_network_behaviour() { - let (tx, _rx) = broadcast::channel(8); - let peer_id = PeerId::random(); - test_runner(|node: TestNode| async move { + let (tx, _rx) = broadcast::channel(8); + let peer_id = PeerId::random(); + // Create two swarms let mut swarm1 = Swarm::new_ephemeral(|_| { ReplicationBehaviour::new( &node.context.store, SyncIngest::new(node.context.schema_provider.clone(), tx.clone()), + node.context.schema_provider.clone(), &peer_id, ) }); @@ -260,8 +272,8 @@ mod tests { let (tx, _rx) = broadcast::channel(8); test_runner_with_manager(|manager: TestNodeManager| async move { - let mut node_a = manager.create().await; - let mut node_b = manager.create().await; + let node_a = manager.create().await; + let node_b = manager.create().await; let peer_id_a = PeerId::random(); let peer_id_b = PeerId::random(); @@ -271,6 +283,7 @@ mod tests { ReplicationBehaviour::new( &node_a.context.store, SyncIngest::new(node_a.context.schema_provider.clone(), tx.clone()), + node_a.context.schema_provider.clone(), &peer_id_a, ) }); @@ -278,6 +291,7 @@ mod tests { ReplicationBehaviour::new( &node_a.context.store, SyncIngest::new(node_b.context.schema_provider.clone(), tx.clone()), + node_b.context.schema_provider.clone(), &peer_id_b, ) }); From 3bd31c413420573474e6c2204bd2dd4c4e7c73c6 Mon Sep 17 00:00:00 2001 From: adz Date: Tue, 23 May 2023 17:21:30 +0200 Subject: [PATCH 007/126] Move mananger again out of network behaviour, add replication service --- aquadoggo/src/network/behaviour.rs | 11 +- .../src/network/replication/behaviour.rs | 328 +++++++----------- aquadoggo/src/network/service.rs | 9 +- aquadoggo/src/network/swarm.rs | 16 +- aquadoggo/src/node.rs | 10 + aquadoggo/src/replication/mod.rs | 2 + aquadoggo/src/replication/service.rs | 16 + 7 files changed, 156 insertions(+), 236 deletions(-) create mode 100644 aquadoggo/src/replication/service.rs diff --git a/aquadoggo/src/network/behaviour.rs b/aquadoggo/src/network/behaviour.rs index c1e25ba66..fb9a9f20c 100644 --- a/aquadoggo/src/network/behaviour.rs +++ b/aquadoggo/src/network/behaviour.rs @@ -7,13 +7,9 @@ use libp2p::swarm::NetworkBehaviour; use libp2p::{autonat, connection_limits, identify, mdns, ping, relay, rendezvous, PeerId}; use log::debug; -use crate::bus::ServiceSender; -use crate::db::SqlStore; use crate::network::config::NODE_NAMESPACE; use crate::network::replication; use crate::network::NetworkConfiguration; -use crate::replication::SyncIngest; -use crate::schema::SchemaProvider; /// Network behaviour for the aquadoggo node. #[derive(NetworkBehaviour)] @@ -60,9 +56,6 @@ impl Behaviour { pub fn new( network_config: &NetworkConfiguration, peer_id: PeerId, - store: &SqlStore, - schema_provider: &SchemaProvider, - tx: ServiceSender, key_pair: Keypair, relay_client: Option, ) -> Result { @@ -143,9 +136,7 @@ impl Behaviour { None }; - let ingest = SyncIngest::new(schema_provider.clone(), tx); - let replication = - replication::Behaviour::new(store, ingest, schema_provider.clone(), &peer_id); + let replication = replication::Behaviour::new(); Ok(Self { autonat: autonat.into(), diff --git a/aquadoggo/src/network/replication/behaviour.rs b/aquadoggo/src/network/replication/behaviour.rs index 3eccf4437..6bd614e17 100644 --- a/aquadoggo/src/network/replication/behaviour.rs +++ b/aquadoggo/src/network/replication/behaviour.rs @@ -11,10 +11,8 @@ use libp2p::swarm::{ }; use libp2p::{Multiaddr, PeerId}; -use crate::db::SqlStore; use crate::network::replication::handler::{Handler, HandlerInEvent, HandlerOutEvent}; -use crate::replication::{Message, SyncIngest, SyncManager, SyncMessage, TargetSet}; -use crate::schema::SchemaProvider; +use crate::replication::SyncMessage; #[derive(Debug)] pub enum BehaviourOutEvent { @@ -25,21 +23,12 @@ pub enum BehaviourOutEvent { #[derive(Debug)] pub struct Behaviour { events: VecDeque>, - manager: SyncManager, - schema_provider: SchemaProvider, } impl Behaviour { - pub fn new( - store: &SqlStore, - ingest: SyncIngest, - schema_provider: SchemaProvider, - peer_id: &PeerId, - ) -> Self { + pub fn new() -> Self { Self { events: VecDeque::new(), - manager: SyncManager::new(store.clone(), ingest, peer_id.clone()), - schema_provider, } } } @@ -62,11 +51,7 @@ impl Behaviour { } fn handle_established_connection(&mut self, remote_peer_id: &PeerId) { - // @TODO: Have an async backend - self.send_message( - *remote_peer_id, - SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))), - ) + // @TODO } } @@ -144,207 +129,144 @@ impl NetworkBehaviour for Behaviour { mod tests { use futures::FutureExt; use libp2p::swarm::{keep_alive, Swarm}; - use libp2p::PeerId; use libp2p_swarm_test::SwarmExt; - use tokio::sync::broadcast; - use crate::replication::{Message, SyncIngest, SyncMessage, TargetSet}; - use crate::test_utils::{test_runner, test_runner_with_manager, TestNode, TestNodeManager}; + use crate::replication::{Message, SyncMessage, TargetSet}; use super::{Behaviour as ReplicationBehaviour, BehaviourOutEvent}; #[tokio::test] async fn peers_connect() { - let (tx, _rx) = broadcast::channel(8); - - test_runner_with_manager(|manager: TestNodeManager| async move { - let node_a = manager.create().await; - let node_b = manager.create().await; - - let peer_id_a = PeerId::random(); - let peer_id_b = PeerId::random(); - - // Create two swarms - let mut swarm1 = Swarm::new_ephemeral(|_| { - ReplicationBehaviour::new( - &node_a.context.store, - SyncIngest::new(node_a.context.schema_provider.clone(), tx.clone()), - node_a.context.schema_provider.clone(), - &peer_id_a, - ) - }); - let mut swarm2 = Swarm::new_ephemeral(|_| { - ReplicationBehaviour::new( - &node_a.context.store, - SyncIngest::new(node_b.context.schema_provider.clone(), tx.clone()), - node_b.context.schema_provider.clone(), - &peer_id_b, - ) - }); - - // Listen on swarm1 and connect from swarm2, this should establish a bi-directional - // connection. - swarm1.listen().await; - swarm2.connect(&mut swarm1).await; - - let swarm1_peer_id = *swarm1.local_peer_id(); - let swarm2_peer_id = *swarm2.local_peer_id(); - - let info1 = swarm1.network_info(); - let info2 = swarm2.network_info(); - - // Peers should be connected. - assert!(swarm2.is_connected(&swarm1_peer_id)); - assert!(swarm1.is_connected(&swarm2_peer_id)); - - // Each swarm should have exactly one connected peer. - assert_eq!(info1.num_peers(), 1); - assert_eq!(info2.num_peers(), 1); - - // Each swarm should have one established connection. - assert_eq!(info1.connection_counters().num_established(), 1); - assert_eq!(info2.connection_counters().num_established(), 1); - }); + // Create two swarms + let mut swarm1 = Swarm::new_ephemeral(|_| ReplicationBehaviour::new()); + let mut swarm2 = Swarm::new_ephemeral(|_| ReplicationBehaviour::new()); + + // Listen on swarm1 and connect from swarm2, this should establish a bi-directional + // connection. + swarm1.listen().await; + swarm2.connect(&mut swarm1).await; + + let swarm1_peer_id = *swarm1.local_peer_id(); + let swarm2_peer_id = *swarm2.local_peer_id(); + + let info1 = swarm1.network_info(); + let info2 = swarm2.network_info(); + + // Peers should be connected. + assert!(swarm2.is_connected(&swarm1_peer_id)); + assert!(swarm1.is_connected(&swarm2_peer_id)); + + // Each swarm should have exactly one connected peer. + assert_eq!(info1.num_peers(), 1); + assert_eq!(info2.num_peers(), 1); + + // Each swarm should have one established connection. + assert_eq!(info1.connection_counters().num_established(), 1); + assert_eq!(info2.connection_counters().num_established(), 1); } #[tokio::test] async fn incompatible_network_behaviour() { - test_runner(|node: TestNode| async move { - let (tx, _rx) = broadcast::channel(8); - let peer_id = PeerId::random(); - - // Create two swarms - let mut swarm1 = Swarm::new_ephemeral(|_| { - ReplicationBehaviour::new( - &node.context.store, - SyncIngest::new(node.context.schema_provider.clone(), tx.clone()), - node.context.schema_provider.clone(), - &peer_id, - ) - }); - - let mut swarm2 = Swarm::new_ephemeral(|_| keep_alive::Behaviour); - - // Listen on swarm1 and connect from swarm2, this should establish a bi-directional connection. - swarm1.listen().await; - swarm2.connect(&mut swarm1).await; - - let swarm1_peer_id = *swarm1.local_peer_id(); - let swarm2_peer_id = *swarm2.local_peer_id(); - - let info1 = swarm1.network_info(); - let info2 = swarm2.network_info(); - - // Even though the network behaviours of our two peers are incompatible they still - // establish a connection. - - // Peers should be connected. - assert!(swarm2.is_connected(&swarm1_peer_id)); - assert!(swarm1.is_connected(&swarm2_peer_id)); - - // Each swarm should have exactly one connected peer. - assert_eq!(info1.num_peers(), 1); - assert_eq!(info2.num_peers(), 1); - - // Each swarm should have one established connection. - assert_eq!(info1.connection_counters().num_established(), 1); - assert_eq!(info2.connection_counters().num_established(), 1); - - // Send a message from to swarm1 local peer from swarm2 local peer. - swarm1.behaviour_mut().send_message( - swarm2_peer_id, - SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))), - ); - - // Await a swarm event on swarm2. - // - // We expect a timeout panic as no event will occur. - let result = std::panic::AssertUnwindSafe(swarm2.next_swarm_event()) - .catch_unwind() - .await; - - assert!(result.is_err()) - }); + // Create two swarms + let mut swarm1 = Swarm::new_ephemeral(|_| ReplicationBehaviour::new()); + + let mut swarm2 = Swarm::new_ephemeral(|_| keep_alive::Behaviour); + + // Listen on swarm1 and connect from swarm2, this should establish a bi-directional connection. + swarm1.listen().await; + swarm2.connect(&mut swarm1).await; + + let swarm1_peer_id = *swarm1.local_peer_id(); + let swarm2_peer_id = *swarm2.local_peer_id(); + + let info1 = swarm1.network_info(); + let info2 = swarm2.network_info(); + + // Even though the network behaviours of our two peers are incompatible they still + // establish a connection. + + // Peers should be connected. + assert!(swarm2.is_connected(&swarm1_peer_id)); + assert!(swarm1.is_connected(&swarm2_peer_id)); + + // Each swarm should have exactly one connected peer. + assert_eq!(info1.num_peers(), 1); + assert_eq!(info2.num_peers(), 1); + + // Each swarm should have one established connection. + assert_eq!(info1.connection_counters().num_established(), 1); + assert_eq!(info2.connection_counters().num_established(), 1); + + // Send a message from to swarm1 local peer from swarm2 local peer. + swarm1.behaviour_mut().send_message( + swarm2_peer_id, + SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))), + ); + + // Await a swarm event on swarm2. + // + // We expect a timeout panic as no event will occur. + let result = std::panic::AssertUnwindSafe(swarm2.next_swarm_event()) + .catch_unwind() + .await; + + assert!(result.is_err()) } #[tokio::test] async fn swarm_behaviour_events() { - let (tx, _rx) = broadcast::channel(8); - - test_runner_with_manager(|manager: TestNodeManager| async move { - let node_a = manager.create().await; - let node_b = manager.create().await; - - let peer_id_a = PeerId::random(); - let peer_id_b = PeerId::random(); - - // Create two swarms - let mut swarm1 = Swarm::new_ephemeral(|_| { - ReplicationBehaviour::new( - &node_a.context.store, - SyncIngest::new(node_a.context.schema_provider.clone(), tx.clone()), - node_a.context.schema_provider.clone(), - &peer_id_a, - ) - }); - let mut swarm2 = Swarm::new_ephemeral(|_| { - ReplicationBehaviour::new( - &node_a.context.store, - SyncIngest::new(node_b.context.schema_provider.clone(), tx.clone()), - node_b.context.schema_provider.clone(), - &peer_id_b, - ) - }); - - // Listen on swarm1 and connect from swarm2, this should establish a bi-directional connection. - swarm1.listen().await; - swarm2.connect(&mut swarm1).await; - - let mut res1 = Vec::new(); - let mut res2 = Vec::new(); - - let swarm1_peer_id = *swarm1.local_peer_id(); - let swarm2_peer_id = *swarm2.local_peer_id(); - - // Send a message from to swarm1 local peer from swarm2 local peer. - swarm1.behaviour_mut().send_message( - swarm2_peer_id, - SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))), - ); - - // Send a message from to swarm2 local peer from swarm1 local peer. - swarm2.behaviour_mut().send_message( - swarm1_peer_id, - SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))), - ); - - // Collect the next 2 behaviour events which occur in either swarms. - for _ in 0..2 { - tokio::select! { - BehaviourOutEvent::MessageReceived(peer_id, message) = swarm1.next_behaviour_event() => res1.push((peer_id, message)), - BehaviourOutEvent::MessageReceived(peer_id, message) = swarm2.next_behaviour_event() => res2.push((peer_id, message)), - } + // Create two swarms + let mut swarm1 = Swarm::new_ephemeral(|_| ReplicationBehaviour::new()); + let mut swarm2 = Swarm::new_ephemeral(|_| ReplicationBehaviour::new()); + + // Listen on swarm1 and connect from swarm2, this should establish a bi-directional connection. + swarm1.listen().await; + swarm2.connect(&mut swarm1).await; + + let mut res1 = Vec::new(); + let mut res2 = Vec::new(); + + let swarm1_peer_id = *swarm1.local_peer_id(); + let swarm2_peer_id = *swarm2.local_peer_id(); + + // Send a message from to swarm1 local peer from swarm2 local peer. + swarm1.behaviour_mut().send_message( + swarm2_peer_id, + SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))), + ); + + // Send a message from to swarm2 local peer from swarm1 local peer. + swarm2.behaviour_mut().send_message( + swarm1_peer_id, + SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))), + ); + + // Collect the next 2 behaviour events which occur in either swarms. + for _ in 0..2 { + tokio::select! { + BehaviourOutEvent::MessageReceived(peer_id, message) = swarm1.next_behaviour_event() => res1.push((peer_id, message)), + BehaviourOutEvent::MessageReceived(peer_id, message) = swarm2.next_behaviour_event() => res2.push((peer_id, message)), } + } - // Each swarm should have emitted exactly one event. - assert_eq!(res1.len(), 1); - assert_eq!(res2.len(), 1); - - // swarm1 should have received the message from swarm2 peer. - let (peer_id, message) = &res1[0]; - assert_eq!(peer_id, &swarm2_peer_id); - assert_eq!( - message, - &SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))) - ); - - // swarm2 should have received the message from swarm1 peer. - let (peer_id, message) = &res2[0]; - assert_eq!(peer_id, &swarm1_peer_id); - assert_eq!( - message, - &SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))) - ); - }); + // Each swarm should have emitted exactly one event. + assert_eq!(res1.len(), 1); + assert_eq!(res2.len(), 1); + + // swarm1 should have received the message from swarm2 peer. + let (peer_id, message) = &res1[0]; + assert_eq!(peer_id, &swarm2_peer_id); + assert_eq!( + message, + &SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))) + ); + + // swarm2 should have received the message from swarm1 peer. + let (peer_id, message) = &res2[0]; + assert_eq!(peer_id, &swarm1_peer_id); + assert_eq!( + message, + &SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))) + ); } } diff --git a/aquadoggo/src/network/service.rs b/aquadoggo/src/network/service.rs index 2ad80055a..d7d8a22c0 100644 --- a/aquadoggo/src/network/service.rs +++ b/aquadoggo/src/network/service.rs @@ -37,14 +37,7 @@ pub async fn network_service( let network_config = context.config.network.clone(); // Build the network swarm and retrieve the local peer ID - let (mut swarm, local_peer_id) = swarm::build_swarm( - &network_config, - &context.store, - &context.schema_provider, - tx.clone(), - key_pair, - ) - .await?; + let (mut swarm, local_peer_id) = swarm::build_swarm(&network_config, key_pair).await?; // Define the QUIC multiaddress on which the swarm will listen for connections let quic_multiaddr = diff --git a/aquadoggo/src/network/swarm.rs b/aquadoggo/src/network/swarm.rs index 92f3b9f0e..2891871ce 100644 --- a/aquadoggo/src/network/swarm.rs +++ b/aquadoggo/src/network/swarm.rs @@ -9,18 +9,12 @@ use libp2p::PeerId; use libp2p::Swarm; use log::info; -use crate::bus::ServiceSender; -use crate::db::SqlStore; use crate::network::behaviour::Behaviour; use crate::network::transport; use crate::network::NetworkConfiguration; -use crate::schema::SchemaProvider; pub async fn build_swarm( network_config: &NetworkConfiguration, - store: &SqlStore, - schema_provider: &SchemaProvider, - tx: ServiceSender, key_pair: Keypair, ) -> Result<(Swarm, PeerId)> { // Read the peer ID (public key) from the key pair @@ -34,15 +28,7 @@ pub async fn build_swarm( // Instantiate the custom network behaviour with default configuration // and the libp2p peer ID - let behaviour = Behaviour::new( - network_config, - peer_id, - store, - schema_provider, - tx, - key_pair, - relay_client, - )?; + let behaviour = Behaviour::new(network_config, peer_id, key_pair, relay_client)?; // Initialise a swarm with QUIC transports, our composed network behaviour // and the default configuration parameters diff --git a/aquadoggo/src/node.rs b/aquadoggo/src/node.rs index 8573b5142..0da6886fc 100644 --- a/aquadoggo/src/node.rs +++ b/aquadoggo/src/node.rs @@ -11,6 +11,7 @@ use crate::http::http_service; use crate::manager::ServiceManager; use crate::materializer::materializer_service; use crate::network::network_service; +use crate::replication::replication_service; use crate::schema::SchemaProvider; /// Capacity of the internal broadcast channel used to communicate between services. @@ -81,6 +82,15 @@ impl Node { panic!("Failed starting network service"); } + // Start replication service syncing data with other nodes + if manager + .add("replication", replication_service) + .await + .is_err() + { + panic!("Failed starting replication service"); + } + Self { pool, manager } } diff --git a/aquadoggo/src/replication/mod.rs b/aquadoggo/src/replication/mod.rs index bb943f877..e3584fedc 100644 --- a/aquadoggo/src/replication/mod.rs +++ b/aquadoggo/src/replication/mod.rs @@ -5,6 +5,7 @@ mod ingest; mod manager; mod message; mod mode; +mod service; mod session; mod strategies; mod target_set; @@ -14,6 +15,7 @@ pub use ingest::SyncIngest; pub use manager::SyncManager; pub use message::{LiveMode, LogHeight, Message, SyncMessage}; pub use mode::Mode; +pub use service::replication_service; pub use session::{Session, SessionId, SessionState}; pub use strategies::{NaiveStrategy, SetReconciliationStrategy, StrategyResult}; pub use target_set::TargetSet; diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs new file mode 100644 index 000000000..786b579ab --- /dev/null +++ b/aquadoggo/src/replication/service.rs @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: AGPL-3.0-or-later + +use anyhow::Result; + +use crate::bus::ServiceSender; +use crate::context::Context; +use crate::manager::{ServiceReadySender, Shutdown}; + +pub async fn replication_service( + context: Context, + signal: Shutdown, + tx: ServiceSender, + tx_ready: ServiceReadySender, +) -> Result<()> { + Ok(()) +} From abdaa742f613144a46409955c42f1f7e7c440788 Mon Sep 17 00:00:00 2001 From: adz Date: Tue, 23 May 2023 18:11:39 +0200 Subject: [PATCH 008/126] Introduce event loop to handle swarm and channel events --- Cargo.lock | 2 + aquadoggo/Cargo.toml | 1 + .../src/network/replication/behaviour.rs | 34 +- aquadoggo/src/network/replication/mod.rs | 2 +- aquadoggo/src/network/service.rs | 473 ++++++++++-------- 5 files changed, 280 insertions(+), 232 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e54f203b3..ec634c091 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -201,6 +201,7 @@ dependencies = [ "tempfile", "thiserror", "tokio", + "tokio-stream", "tower", "tower-http", "tower-service", @@ -4873,6 +4874,7 @@ dependencies = [ "futures-core", "pin-project-lite", "tokio", + "tokio-util", ] [[package]] diff --git a/aquadoggo/Cargo.toml b/aquadoggo/Cargo.toml index 1f51d48eb..0745042cd 100644 --- a/aquadoggo/Cargo.toml +++ b/aquadoggo/Cargo.toml @@ -70,6 +70,7 @@ tokio = { version = "1.25.0", features = [ "sync", "time", ] } +tokio-stream = { version = "0.1.14", features = ["sync"] } tower-http = { version = "0.3.4", default-features = false, features = [ "cors", ] } diff --git a/aquadoggo/src/network/replication/behaviour.rs b/aquadoggo/src/network/replication/behaviour.rs index 6bd614e17..4762d848d 100644 --- a/aquadoggo/src/network/replication/behaviour.rs +++ b/aquadoggo/src/network/replication/behaviour.rs @@ -4,7 +4,6 @@ use std::collections::VecDeque; use std::task::{Context, Poll}; use libp2p::core::Endpoint; -use libp2p::swarm::derive_prelude::ConnectionEstablished; use libp2p::swarm::{ ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, NotifyHandler, PollParameters, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, @@ -15,14 +14,13 @@ use crate::network::replication::handler::{Handler, HandlerInEvent, HandlerOutEv use crate::replication::SyncMessage; #[derive(Debug)] -pub enum BehaviourOutEvent { +pub enum Event { MessageReceived(PeerId, SyncMessage), - Error, } #[derive(Debug)] pub struct Behaviour { - events: VecDeque>, + events: VecDeque>, } impl Behaviour { @@ -34,7 +32,7 @@ impl Behaviour { } impl Behaviour { - fn send_message(&mut self, peer_id: PeerId, message: SyncMessage) { + pub fn send_message(&mut self, peer_id: PeerId, message: SyncMessage) { self.events.push_back(ToSwarm::NotifyHandler { peer_id, event: HandlerInEvent::Message(message), @@ -43,22 +41,17 @@ impl Behaviour { } fn handle_received_message(&mut self, peer_id: &PeerId, message: SyncMessage) { - // @TODO: Handle incoming messages self.events - .push_back(ToSwarm::GenerateEvent(BehaviourOutEvent::MessageReceived( + .push_back(ToSwarm::GenerateEvent(Event::MessageReceived( *peer_id, message, ))); } - - fn handle_established_connection(&mut self, remote_peer_id: &PeerId) { - // @TODO - } } impl NetworkBehaviour for Behaviour { type ConnectionHandler = Handler; - type OutEvent = BehaviourOutEvent; + type OutEvent = Event; fn handle_established_inbound_connection( &mut self, @@ -95,10 +88,8 @@ impl NetworkBehaviour for Behaviour { fn on_swarm_event(&mut self, event: FromSwarm) { match event { - FromSwarm::ConnectionEstablished(ConnectionEstablished { peer_id, .. }) => { - self.handle_established_connection(&peer_id) - } - FromSwarm::ConnectionClosed(_) + FromSwarm::ConnectionEstablished(_) + | FromSwarm::ConnectionClosed(_) | FromSwarm::AddressChange(_) | FromSwarm::DialFailure(_) | FromSwarm::ListenFailure(_) @@ -133,7 +124,7 @@ mod tests { use crate::replication::{Message, SyncMessage, TargetSet}; - use super::{Behaviour as ReplicationBehaviour, BehaviourOutEvent}; + use super::{Behaviour as ReplicationBehaviour, Event}; #[tokio::test] async fn peers_connect() { @@ -169,7 +160,6 @@ mod tests { async fn incompatible_network_behaviour() { // Create two swarms let mut swarm1 = Swarm::new_ephemeral(|_| ReplicationBehaviour::new()); - let mut swarm2 = Swarm::new_ephemeral(|_| keep_alive::Behaviour); // Listen on swarm1 and connect from swarm2, this should establish a bi-directional connection. @@ -238,14 +228,14 @@ mod tests { // Send a message from to swarm2 local peer from swarm1 local peer. swarm2.behaviour_mut().send_message( swarm1_peer_id, - SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))), + SyncMessage::new(1, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))), ); // Collect the next 2 behaviour events which occur in either swarms. for _ in 0..2 { tokio::select! { - BehaviourOutEvent::MessageReceived(peer_id, message) = swarm1.next_behaviour_event() => res1.push((peer_id, message)), - BehaviourOutEvent::MessageReceived(peer_id, message) = swarm2.next_behaviour_event() => res2.push((peer_id, message)), + Event::MessageReceived(peer_id, message) = swarm1.next_behaviour_event() => res1.push((peer_id, message)), + Event::MessageReceived(peer_id, message) = swarm2.next_behaviour_event() => res2.push((peer_id, message)), } } @@ -258,7 +248,7 @@ mod tests { assert_eq!(peer_id, &swarm2_peer_id); assert_eq!( message, - &SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))) + &SyncMessage::new(1, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))) ); // swarm2 should have received the message from swarm1 peer. diff --git a/aquadoggo/src/network/replication/mod.rs b/aquadoggo/src/network/replication/mod.rs index 80d1c702f..1962e64d4 100644 --- a/aquadoggo/src/network/replication/mod.rs +++ b/aquadoggo/src/network/replication/mod.rs @@ -4,6 +4,6 @@ mod behaviour; mod handler; mod protocol; -pub use behaviour::Behaviour; +pub use behaviour::{Behaviour, Event}; pub use handler::Handler; pub use protocol::{Codec, CodecError, Protocol, PROTOCOL_NAME}; diff --git a/aquadoggo/src/network/service.rs b/aquadoggo/src/network/service.rs index d7d8a22c0..847d305eb 100644 --- a/aquadoggo/src/network/service.rs +++ b/aquadoggo/src/network/service.rs @@ -1,18 +1,21 @@ // SPDX-License-Identifier: AGPL-3.0-or-later use anyhow::Result; -use futures::StreamExt; +// use futures::StreamExt; use libp2p::multiaddr::Protocol; use libp2p::ping::Event; use libp2p::swarm::{AddressScore, SwarmEvent}; -use libp2p::{autonat, identify, mdns, rendezvous, Multiaddr}; +use libp2p::{autonat, identify, mdns, rendezvous, Multiaddr, Swarm}; use log::{debug, info, trace, warn}; +use tokio_stream::wrappers::BroadcastStream; +use tokio_stream::StreamExt; -use crate::bus::ServiceSender; +use crate::bus::{ServiceMessage, ServiceSender}; use crate::context::Context; use crate::manager::{ServiceReadySender, Shutdown}; -use crate::network::behaviour::BehaviourEvent; +use crate::network::behaviour::{Behaviour, BehaviourEvent}; use crate::network::config::NODE_NAMESPACE; +use crate::network::replication; use crate::network::swarm; use crate::network::NetworkConfiguration; @@ -26,9 +29,6 @@ pub async fn network_service( tx: ServiceSender, tx_ready: ServiceReadySender, ) -> Result<()> { - // Subscribe to communication bus - let mut _rx = tx.subscribe(); - // Load the network key pair and peer ID let key_pair = NetworkConfiguration::load_or_generate_key_pair(context.config.base_path.clone())?; @@ -79,177 +79,273 @@ pub async fn network_service( swarm.dial(addr)?; } - // Create a cookie holder for the identify service - let mut cookie = None; + // Spawn a task to run swarm + let event_loop = EventLoop::new(swarm, tx, external_circuit_addr, network_config); + let handle = tokio::spawn(event_loop.run()); + + info!("Network service is ready"); + + if tx_ready.send(()).is_err() { + warn!("No subscriber informed about network service being ready"); + }; + + // Wait until we received the application shutdown signal or handle closed + tokio::select! { + _ = handle => (), + _ = shutdown => (), + } - // Spawn a task to handle swarm events - let handle = tokio::spawn(async move { + Ok(()) +} + +struct EventLoop { + swarm: Swarm, + tx: ServiceSender, + rx: BroadcastStream, + external_circuit_addr: Option, + network_config: NetworkConfiguration, +} + +impl EventLoop { + pub fn new( + swarm: Swarm, + tx: ServiceSender, + external_circuit_addr: Option, + network_config: NetworkConfiguration, + ) -> Self { + Self { + swarm, + rx: BroadcastStream::new(tx.subscribe()), + tx, + external_circuit_addr, + network_config, + } + } + + pub async fn run(mut self) { loop { - match swarm.select_next_some().await { - SwarmEvent::Behaviour(BehaviourEvent::Mdns(event)) => match event { - mdns::Event::Discovered(list) => { - for (peer, multiaddr) in list { - debug!("mDNS discovered a new peer: {peer}"); - - if let Err(err) = swarm.dial(multiaddr) { - warn!("Failed to dial: {}", err); - } + tokio::select! { + event = self.swarm.next() => { + self.handle_swarm_event(event.expect("Swarm stream to be infinite.")).await + } + event = self.rx.next() => match event { + Some(Ok(message)) => self.handle_incoming_message(message).await, + Some(Err(err)) => { + // @TODO + } + // Command channel closed, thus shutting down the network event loop. + None => return, + }, + } + } + } + + async fn handle_incoming_message(&mut self, message: ServiceMessage) {} + + async fn handle_swarm_event( + &mut self, + event: SwarmEvent, + ) { + match event { + SwarmEvent::Behaviour(BehaviourEvent::Mdns(event)) => match event { + mdns::Event::Discovered(list) => { + for (peer, multiaddr) in list { + debug!("mDNS discovered a new peer: {peer}"); + + if let Err(err) = self.swarm.dial(multiaddr) { + warn!("Failed to dial: {}", err); } } - mdns::Event::Expired(list) => { - for (peer, _multiaddr) in list { - trace!("mDNS peer has expired: {peer}"); + } + mdns::Event::Expired(list) => { + for (peer, _multiaddr) in list { + trace!("mDNS peer has expired: {peer}"); + } + } + }, + SwarmEvent::Behaviour(BehaviourEvent::Ping(Event { peer, result: _ })) => { + debug!("Ping from: {peer}") + } + SwarmEvent::ConnectionClosed { + peer_id, + endpoint, + num_established, + cause, + } => { + info!("ConnectionClosed: {peer_id} {endpoint:?} {num_established} {cause:?}") + } + SwarmEvent::ConnectionEstablished { + peer_id, + endpoint, + num_established, + .. + } => { + info!("ConnectionEstablished: {peer_id} {endpoint:?} {num_established}"); + + // Match on a connection with the rendezvous server + if let Some(rendezvous_peer_id) = self.network_config.rendezvous_peer_id { + if peer_id == rendezvous_peer_id { + if let Some(rendezvous_client) = + self.swarm.behaviour_mut().rendezvous_client.as_mut() + { + trace!("Connected to rendezvous point, discovering nodes in '{NODE_NAMESPACE}' namespace ..."); + + rendezvous_client.discover( + Some(rendezvous::Namespace::from_static(NODE_NAMESPACE)), + None, + None, + rendezvous_peer_id, + ); } } - }, - SwarmEvent::Behaviour(BehaviourEvent::Ping(Event { peer, result: _ })) => { - debug!("Ping from: {peer}") } - SwarmEvent::ConnectionClosed { - peer_id, - endpoint, - num_established, - cause, + } + SwarmEvent::Dialing(peer_id) => info!("Dialing: {peer_id}"), + SwarmEvent::ExpiredListenAddr { + listener_id, + address, + } => trace!("ExpiredListenAddr: {listener_id:?} {address}"), + + SwarmEvent::IncomingConnection { + local_addr, + send_back_addr, + } => debug!("IncomingConnection: {local_addr} {send_back_addr}"), + SwarmEvent::IncomingConnectionError { + local_addr, + send_back_addr, + error, + } => warn!("IncomingConnectionError: {local_addr} {send_back_addr} {error:?}"), + SwarmEvent::ListenerClosed { + listener_id, + addresses, + reason, + } => trace!("ListenerClosed: {listener_id:?} {addresses:?} {reason:?}"), + SwarmEvent::ListenerError { listener_id, error } => { + warn!("ListenerError: {listener_id:?} {error:?}") + } + SwarmEvent::NewListenAddr { + address, + listener_id: _, + } => { + info!("Listening on {address}"); + } + SwarmEvent::OutgoingConnectionError { peer_id, error } => { + warn!("OutgoingConnectionError: {peer_id:?} {error:?}") + } + SwarmEvent::Behaviour(BehaviourEvent::RendezvousClient(event)) => match event { + rendezvous::client::Event::Registered { + namespace, + ttl, + rendezvous_node, } => { - info!("ConnectionClosed: {peer_id} {endpoint:?} {num_established} {cause:?}") + trace!("Registered for '{namespace}' namespace at rendezvous point {rendezvous_node} for the next {ttl} seconds") } - SwarmEvent::ConnectionEstablished { - peer_id, - endpoint, - num_established, - .. - } => { - info!("ConnectionEstablished: {peer_id} {endpoint:?} {num_established}"); + rendezvous::client::Event::Discovered { registrations, .. } => { + trace!("Rendezvous point responded with peer registration data"); - // Match on a connection with the rendezvous server - if let Some(rendezvous_peer_id) = network_config.rendezvous_peer_id { - if peer_id == rendezvous_peer_id { - if let Some(rendezvous_client) = - swarm.behaviour_mut().rendezvous_client.as_mut() - { - trace!("Connected to rendezvous point, discovering nodes in '{NODE_NAMESPACE}' namespace ..."); + for registration in registrations { + for address in registration.record.addresses() { + let peer_id = registration.record.peer_id(); + let local_peer_id = *self.swarm.local_peer_id(); - rendezvous_client.discover( - Some(rendezvous::Namespace::from_static(NODE_NAMESPACE)), - None, - None, - rendezvous_peer_id, - ); + // Only dial remote peers discovered via rendezvous server + if peer_id != local_peer_id { + debug!("Discovered peer {peer_id} at {address}"); + + let p2p_suffix = Protocol::P2p(*peer_id.as_ref()); + let address_with_p2p = if !address + .ends_with(&Multiaddr::empty().with(p2p_suffix.clone())) + { + address.clone().with(p2p_suffix) + } else { + address.clone() + }; + + debug!("Preparing to dial peer {peer_id} at {address}"); + + if let Err(err) = self.swarm.dial(address_with_p2p) { + warn!("Failed to dial: {}", err); + } } } } } - SwarmEvent::Dialing(peer_id) => info!("Dialing: {peer_id}"), - SwarmEvent::ExpiredListenAddr { - listener_id, - address, - } => trace!("ExpiredListenAddr: {listener_id:?} {address}"), - - SwarmEvent::IncomingConnection { - local_addr, - send_back_addr, - } => debug!("IncomingConnection: {local_addr} {send_back_addr}"), - SwarmEvent::IncomingConnectionError { - local_addr, - send_back_addr, - error, - } => warn!("IncomingConnectionError: {local_addr} {send_back_addr} {error:?}"), - SwarmEvent::ListenerClosed { - listener_id, - addresses, - reason, - } => trace!("ListenerClosed: {listener_id:?} {addresses:?} {reason:?}"), - SwarmEvent::ListenerError { listener_id, error } => { - warn!("ListenerError: {listener_id:?} {error:?}") + rendezvous::client::Event::RegisterFailed(error) => { + warn!("Failed to register with rendezvous point: {error}"); } - SwarmEvent::NewListenAddr { - address, - listener_id: _, - } => { - info!("Listening on {address}"); + other => trace!("Unhandled rendezvous client event: {other:?}"), + }, + SwarmEvent::Behaviour(BehaviourEvent::RendezvousServer(event)) => match event { + rendezvous::server::Event::PeerRegistered { peer, registration } => { + trace!( + "Peer {peer} registered for namespace '{}'", + registration.namespace + ); } - SwarmEvent::OutgoingConnectionError { peer_id, error } => { - warn!("OutgoingConnectionError: {peer_id:?} {error:?}") + rendezvous::server::Event::DiscoverServed { + enquirer, + registrations, + } => { + trace!( + "Served peer {enquirer} with {} registrations", + registrations.len() + ); } - SwarmEvent::Behaviour(BehaviourEvent::RendezvousClient(event)) => match event { - rendezvous::client::Event::Registered { - namespace, - ttl, - rendezvous_node, - } => { - trace!("Registered for '{namespace}' namespace at rendezvous point {rendezvous_node} for the next {ttl} seconds") - } - rendezvous::client::Event::Discovered { - registrations, - cookie: new_cookie, - .. - } => { - trace!("Rendezvous point responded with peer registration data"); - - cookie.replace(new_cookie); - - for registration in registrations { - for address in registration.record.addresses() { - let peer_id = registration.record.peer_id(); - - // Only dial remote peers discovered via rendezvous server - if peer_id != local_peer_id { - debug!("Discovered peer {peer_id} at {address}"); - - let p2p_suffix = Protocol::P2p(*peer_id.as_ref()); - let address_with_p2p = if !address - .ends_with(&Multiaddr::empty().with(p2p_suffix.clone())) - { - address.clone().with(p2p_suffix) - } else { - address.clone() - }; - - debug!("Preparing to dial peer {peer_id} at {address}"); - - if let Err(err) = swarm.dial(address_with_p2p) { - warn!("Failed to dial: {}", err); - } - } + other => trace!("Unhandled rendezvous server event: {other:?}"), + }, + SwarmEvent::Behaviour(BehaviourEvent::Identify(event)) => { + match event { + identify::Event::Received { peer_id, .. } => { + trace!("Received identify information from peer {peer_id}"); + + // Only attempt registration if the local node is running as a rendezvous client + if let Some(rendezvous_peer_id) = self.network_config.rendezvous_peer_id { + // Register with the rendezvous server. + + // We call `as_mut()` on the rendezvous client network behaviour in + // order to get a mutable reference out of the `Toggle` + if let Some(rendezvous_client) = + self.swarm.behaviour_mut().rendezvous_client.as_mut() + { + rendezvous_client.register( + rendezvous::Namespace::from_static(NODE_NAMESPACE), + rendezvous_peer_id, + None, + ); } } } - rendezvous::client::Event::RegisterFailed(error) => { - warn!("Failed to register with rendezvous point: {error}"); - } - other => trace!("Unhandled rendezvous client event: {other:?}"), - }, - SwarmEvent::Behaviour(BehaviourEvent::RendezvousServer(event)) => match event { - rendezvous::server::Event::PeerRegistered { peer, registration } => { + identify::Event::Sent { peer_id } | identify::Event::Pushed { peer_id } => { trace!( - "Peer {peer} registered for namespace '{}'", - registration.namespace - ); + "Sent identification information of the local node to peer {peer_id}" + ) } - rendezvous::server::Event::DiscoverServed { - enquirer, - registrations, - } => { - trace!( - "Served peer {enquirer} with {} registrations", - registrations.len() - ); + identify::Event::Error { peer_id, error } => { + warn!("Failed to identify the remote peer {peer_id}: {error}") } - other => trace!("Unhandled rendezvous server event: {other:?}"), - }, - SwarmEvent::Behaviour(BehaviourEvent::Identify(event)) => { - match event { - identify::Event::Received { peer_id, .. } => { - trace!("Received identify information from peer {peer_id}"); + } + } + SwarmEvent::Behaviour(BehaviourEvent::RelayServer(event)) => { + debug!("Unhandled relay server event: {event:?}") + } + SwarmEvent::Behaviour(BehaviourEvent::RelayClient(event)) => { + debug!("Unhandled relay client event: {event:?}") + } + SwarmEvent::Behaviour(BehaviourEvent::Autonat(event)) => { + match event { + autonat::Event::StatusChanged { old, new } => { + trace!("NAT status changed from {:?} to {:?}", old, new); - // Only attempt registration if the local node is running as a rendezvous client - if let Some(rendezvous_peer_id) = network_config.rendezvous_peer_id { - // Register with the rendezvous server. + if let Some(addr) = self.external_circuit_addr.clone() { + trace!("Adding external relayed listen address: {}", addr); + self.swarm + .add_external_address(addr, AddressScore::Finite(1)); - // We call `as_mut()` on the rendezvous client network behaviour in - // order to get a mutable reference out of the `Toggle` + if let Some(rendezvous_peer_id) = self.network_config.rendezvous_peer_id + { + // Invoke registration of relayed client address with the rendezvous server if let Some(rendezvous_client) = - swarm.behaviour_mut().rendezvous_client.as_mut() + self.swarm.behaviour_mut().rendezvous_client.as_mut() { rendezvous_client.register( rendezvous::Namespace::from_static(NODE_NAMESPACE), @@ -259,68 +355,27 @@ pub async fn network_service( } } } - identify::Event::Sent { peer_id } | identify::Event::Pushed { peer_id } => { - trace!( - "Sent identification information of the local node to peer {peer_id}" - ) - } - identify::Event::Error { peer_id, error } => { - warn!("Failed to identify the remote peer {peer_id}: {error}") - } - } - } - SwarmEvent::Behaviour(BehaviourEvent::RelayServer(event)) => { - debug!("Unhandled relay server event: {event:?}") - } - SwarmEvent::Behaviour(BehaviourEvent::RelayClient(event)) => { - debug!("Unhandled relay client event: {event:?}") - } - SwarmEvent::Behaviour(BehaviourEvent::Autonat(event)) => { - match event { - autonat::Event::StatusChanged { old, new } => { - trace!("NAT status changed from {:?} to {:?}", old, new); - - if let Some(addr) = external_circuit_addr.clone() { - trace!("Adding external relayed listen address: {}", addr); - swarm.add_external_address(addr, AddressScore::Finite(1)); - - if let Some(rendezvous_peer_id) = network_config.rendezvous_peer_id - { - // Invoke registration of relayed client address with the rendezvous server - if let Some(rendezvous_client) = - swarm.behaviour_mut().rendezvous_client.as_mut() - { - rendezvous_client.register( - rendezvous::Namespace::from_static(NODE_NAMESPACE), - rendezvous_peer_id, - None, - ); - } - } - } - } - autonat::Event::InboundProbe(_) | autonat::Event::OutboundProbe(_) => (), } + autonat::Event::InboundProbe(_) | autonat::Event::OutboundProbe(_) => (), } - SwarmEvent::Behaviour(BehaviourEvent::Limits(event)) => { - debug!("Unhandled connection limit event: {event:?}") - } - event => debug!("Unhandled swarm event: {event:?}"), } - } - }); - - info!("Network service is ready"); + SwarmEvent::Behaviour(BehaviourEvent::Limits(event)) => { + debug!("Unhandled connection limit event: {event:?}") + } - if tx_ready.send(()).is_err() { - warn!("No subscriber informed about network service being ready"); - }; + // ~~~~~~~~~~~ + // Replication + // ~~~~~~~~~~~ + SwarmEvent::Behaviour(BehaviourEvent::Replication(event)) => match event { + replication::Event::MessageReceived(peer_id, message) => { + debug!("Swarm received replication message: {message:?}"); + } + }, - // Wait until we received the application shutdown signal or handle closed - tokio::select! { - _ = handle => (), - _ = shutdown => (), + // ~~~~~~~ + // Unknown + // ~~~~~~~ + event => debug!("Unhandled swarm event: {event:?}"), + } } - - Ok(()) } From 3c116638c2a8afd9856f665eaffba26e55a711ea Mon Sep 17 00:00:00 2001 From: adz Date: Wed, 24 May 2023 10:49:30 +0200 Subject: [PATCH 009/126] Add new service message types to enum --- aquadoggo/src/bus.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/aquadoggo/src/bus.rs b/aquadoggo/src/bus.rs index ccee7719b..fbbaf0ffa 100644 --- a/aquadoggo/src/bus.rs +++ b/aquadoggo/src/bus.rs @@ -1,8 +1,10 @@ // SPDX-License-Identifier: AGPL-3.0-or-later +use libp2p::PeerId; use p2panda_rs::operation::OperationId; use crate::manager::Sender; +use crate::replication::SyncMessage; /// Sender for cross-service communication bus. pub type ServiceSender = Sender; @@ -12,4 +14,13 @@ pub type ServiceSender = Sender; pub enum ServiceMessage { /// A new operation arrived at the node. NewOperation(OperationId), + + /// Node established a bi-directional connection to another node. + ConnectionEstablished(PeerId), + + /// Node sent a message to remote node for replication. + SentReplicationMessage(PeerId, SyncMessage), + + /// Node received a message from remote node for replication. + ReceivedReplicationMessage(PeerId, SyncMessage), } From 9a67a461286a2ff8eba02a2d0924df9cd4c15613 Mon Sep 17 00:00:00 2001 From: adz Date: Wed, 24 May 2023 10:49:53 +0200 Subject: [PATCH 010/126] Better method name and structure for event loop --- aquadoggo/src/network/mod.rs | 4 +- aquadoggo/src/network/service.rs | 97 +++++++++++++++++++++----------- 2 files changed, 66 insertions(+), 35 deletions(-) diff --git a/aquadoggo/src/network/mod.rs b/aquadoggo/src/network/mod.rs index 1af29360f..dcb56bf17 100644 --- a/aquadoggo/src/network/mod.rs +++ b/aquadoggo/src/network/mod.rs @@ -3,12 +3,10 @@ mod behaviour; mod config; mod identity; +mod replication; mod service; mod swarm; mod transport; -// @TODO: Remove this as soon as we integrated it into the libp2p swarm -#[allow(dead_code)] -mod replication; pub use config::NetworkConfiguration; pub use service::network_service; diff --git a/aquadoggo/src/network/service.rs b/aquadoggo/src/network/service.rs index 847d305eb..9b0fd4e45 100644 --- a/aquadoggo/src/network/service.rs +++ b/aquadoggo/src/network/service.rs @@ -79,7 +79,7 @@ pub async fn network_service( swarm.dial(addr)?; } - // Spawn a task to run swarm + // Spawn a task to run swarm in event loop let event_loop = EventLoop::new(swarm, tx, external_circuit_addr, network_config); let handle = tokio::spawn(event_loop.run()); @@ -126,10 +126,10 @@ impl EventLoop { loop { tokio::select! { event = self.swarm.next() => { - self.handle_swarm_event(event.expect("Swarm stream to be infinite.")).await + self.handle_swarm_event(event.expect("Swarm stream to be infinite")).await } event = self.rx.next() => match event { - Some(Ok(message)) => self.handle_incoming_message(message).await, + Some(Ok(message)) => self.handle_service_message(message).await, Some(Err(err)) => { // @TODO } @@ -140,40 +140,19 @@ impl EventLoop { } } - async fn handle_incoming_message(&mut self, message: ServiceMessage) {} + async fn handle_service_message(&mut self, message: ServiceMessage) { + // @TODO + } async fn handle_swarm_event( &mut self, event: SwarmEvent, ) { match event { - SwarmEvent::Behaviour(BehaviourEvent::Mdns(event)) => match event { - mdns::Event::Discovered(list) => { - for (peer, multiaddr) in list { - debug!("mDNS discovered a new peer: {peer}"); - - if let Err(err) = self.swarm.dial(multiaddr) { - warn!("Failed to dial: {}", err); - } - } - } - mdns::Event::Expired(list) => { - for (peer, _multiaddr) in list { - trace!("mDNS peer has expired: {peer}"); - } - } - }, - SwarmEvent::Behaviour(BehaviourEvent::Ping(Event { peer, result: _ })) => { - debug!("Ping from: {peer}") - } - SwarmEvent::ConnectionClosed { - peer_id, - endpoint, - num_established, - cause, - } => { - info!("ConnectionClosed: {peer_id} {endpoint:?} {num_established} {cause:?}") - } + // ~~~~~ + // Swarm + // ~~~~~ + SwarmEvent::Dialing(peer_id) => info!("Dialing: {peer_id}"), SwarmEvent::ConnectionEstablished { peer_id, endpoint, @@ -200,7 +179,14 @@ impl EventLoop { } } } - SwarmEvent::Dialing(peer_id) => info!("Dialing: {peer_id}"), + SwarmEvent::ConnectionClosed { + peer_id, + endpoint, + num_established, + cause, + } => { + info!("ConnectionClosed: {peer_id} {endpoint:?} {num_established} {cause:?}") + } SwarmEvent::ExpiredListenAddr { listener_id, address, @@ -232,6 +218,37 @@ impl EventLoop { SwarmEvent::OutgoingConnectionError { peer_id, error } => { warn!("OutgoingConnectionError: {peer_id:?} {error:?}") } + + // ~~~~ + // mDNS + // ~~~~ + SwarmEvent::Behaviour(BehaviourEvent::Mdns(event)) => match event { + mdns::Event::Discovered(list) => { + for (peer, multiaddr) in list { + debug!("mDNS discovered a new peer: {peer}"); + + if let Err(err) = self.swarm.dial(multiaddr) { + warn!("Failed to dial: {}", err); + } + } + } + mdns::Event::Expired(list) => { + for (peer, _multiaddr) in list { + trace!("mDNS peer has expired: {peer}"); + } + } + }, + + // ~~~~ + // Ping + // ~~~~ + SwarmEvent::Behaviour(BehaviourEvent::Ping(Event { peer, result: _ })) => { + debug!("Ping from: {peer}") + } + + // ~~~~~~~~~~ + // Rendezvous + // ~~~~~~~~~~ SwarmEvent::Behaviour(BehaviourEvent::RendezvousClient(event)) => match event { rendezvous::client::Event::Registered { namespace, @@ -293,6 +310,10 @@ impl EventLoop { } other => trace!("Unhandled rendezvous server event: {other:?}"), }, + + // ~~~~~~~~ + // Identify + // ~~~~~~~~ SwarmEvent::Behaviour(BehaviourEvent::Identify(event)) => { match event { identify::Event::Received { peer_id, .. } => { @@ -325,12 +346,20 @@ impl EventLoop { } } } + + // ~~~~~ + // Relay + // ~~~~~ SwarmEvent::Behaviour(BehaviourEvent::RelayServer(event)) => { debug!("Unhandled relay server event: {event:?}") } SwarmEvent::Behaviour(BehaviourEvent::RelayClient(event)) => { debug!("Unhandled relay client event: {event:?}") } + + // ~~~~~~~ + // AutoNAT + // ~~~~~~~ SwarmEvent::Behaviour(BehaviourEvent::Autonat(event)) => { match event { autonat::Event::StatusChanged { old, new } => { @@ -359,6 +388,10 @@ impl EventLoop { autonat::Event::InboundProbe(_) | autonat::Event::OutboundProbe(_) => (), } } + + // ~~~~~~ + // Limits + // ~~~~~~ SwarmEvent::Behaviour(BehaviourEvent::Limits(event)) => { debug!("Unhandled connection limit event: {event:?}") } From 26d6445aa3cfbcd12ac6b711af7b1d076281cf5f Mon Sep 17 00:00:00 2001 From: adz Date: Wed, 24 May 2023 11:01:55 +0200 Subject: [PATCH 011/126] Send and receive service messages on new or closed connections and replication messages --- aquadoggo/src/bus.rs | 3 +++ aquadoggo/src/network/service.rs | 44 +++++++++++++++++++++++++------- 2 files changed, 38 insertions(+), 9 deletions(-) diff --git a/aquadoggo/src/bus.rs b/aquadoggo/src/bus.rs index fbbaf0ffa..bf95a13ee 100644 --- a/aquadoggo/src/bus.rs +++ b/aquadoggo/src/bus.rs @@ -18,6 +18,9 @@ pub enum ServiceMessage { /// Node established a bi-directional connection to another node. ConnectionEstablished(PeerId), + /// Node closed a connection to another node. + ConnectionClosed(PeerId), + /// Node sent a message to remote node for replication. SentReplicationMessage(PeerId, SyncMessage), diff --git a/aquadoggo/src/network/service.rs b/aquadoggo/src/network/service.rs index 9b0fd4e45..939de12e9 100644 --- a/aquadoggo/src/network/service.rs +++ b/aquadoggo/src/network/service.rs @@ -1,7 +1,6 @@ // SPDX-License-Identifier: AGPL-3.0-or-later use anyhow::Result; -// use futures::StreamExt; use libp2p::multiaddr::Protocol; use libp2p::ping::Event; use libp2p::swarm::{AddressScore, SwarmEvent}; @@ -122,6 +121,8 @@ impl EventLoop { } } + /// Main event loop handling libp2p swarm events and incoming messages from the service bus as + /// an ongoing async stream. pub async fn run(mut self) { loop { tokio::select! { @@ -131,19 +132,39 @@ impl EventLoop { event = self.rx.next() => match event { Some(Ok(message)) => self.handle_service_message(message).await, Some(Err(err)) => { - // @TODO + panic!("Service bus subscriber for event loop failed: {}", err); } - // Command channel closed, thus shutting down the network event loop. + // Command channel closed, thus shutting down the network event loop None => return, }, } } } + /// Send a message on the communication bus to inform other services. + fn send_service_message(&mut self, message: ServiceMessage) { + if self.tx.send(message).is_err() { + // Silently fail here as we don't care if the message was received at this + // point + } + } + + /// Handle an incoming message via the communication bus from other services. async fn handle_service_message(&mut self, message: ServiceMessage) { - // @TODO + match message { + ServiceMessage::SentReplicationMessage(peer_id, sync_message) => { + self.swarm + .behaviour_mut() + .replication + .send_message(peer_id, sync_message); + } + _ => { + // Ignore all other messages + } + } } + /// Handle an event coming from the libp2p swarm. async fn handle_swarm_event( &mut self, event: SwarmEvent, @@ -178,6 +199,9 @@ impl EventLoop { } } } + + // Inform other services about new connection + self.send_service_message(ServiceMessage::ConnectionEstablished(peer_id)); } SwarmEvent::ConnectionClosed { peer_id, @@ -185,13 +209,15 @@ impl EventLoop { num_established, cause, } => { - info!("ConnectionClosed: {peer_id} {endpoint:?} {num_established} {cause:?}") + info!("ConnectionClosed: {peer_id} {endpoint:?} {num_established} {cause:?}"); + + // Inform other services about closed connection + self.send_service_message(ServiceMessage::ConnectionClosed(peer_id)); } SwarmEvent::ExpiredListenAddr { listener_id, address, } => trace!("ExpiredListenAddr: {listener_id:?} {address}"), - SwarmEvent::IncomingConnection { local_addr, send_back_addr, @@ -400,9 +426,9 @@ impl EventLoop { // Replication // ~~~~~~~~~~~ SwarmEvent::Behaviour(BehaviourEvent::Replication(event)) => match event { - replication::Event::MessageReceived(peer_id, message) => { - debug!("Swarm received replication message: {message:?}"); - } + replication::Event::MessageReceived(peer_id, message) => self.send_service_message( + ServiceMessage::ReceivedReplicationMessage(peer_id, message), + ), }, // ~~~~~~~ From b0e44eb905ed4db2a95ede2a83ea88fa57fe3221 Mon Sep 17 00:00:00 2001 From: adz Date: Wed, 24 May 2023 11:20:28 +0200 Subject: [PATCH 012/126] Have peer id on network config struct --- aquadoggo/src/config.rs | 4 ++++ aquadoggo/src/network/config.rs | 19 ++++++++++++++----- aquadoggo/src/network/service.rs | 3 ++- aquadoggo/src/network/swarm.rs | 10 ++++------ aquadoggo/src/replication/service.rs | 10 ++++++++++ 5 files changed, 34 insertions(+), 12 deletions(-) diff --git a/aquadoggo/src/config.rs b/aquadoggo/src/config.rs index 4b651530d..056254d4e 100644 --- a/aquadoggo/src/config.rs +++ b/aquadoggo/src/config.rs @@ -98,6 +98,10 @@ impl Configuration { } }; + // Derive peer id from key pair + let key_pair = NetworkConfiguration::load_or_generate_key_pair(config.base_path)?; + config.network.set_peer_id(&key_pair.public()); + Ok(config) } } diff --git a/aquadoggo/src/network/config.rs b/aquadoggo/src/network/config.rs index e5b1d283f..48415c861 100644 --- a/aquadoggo/src/network/config.rs +++ b/aquadoggo/src/network/config.rs @@ -4,7 +4,7 @@ use std::path::PathBuf; use anyhow::Result; use libp2p::connection_limits::ConnectionLimits; -use libp2p::identity::Keypair; +use libp2p::identity::{Keypair, PublicKey}; use libp2p::{Multiaddr, PeerId}; use log::info; use serde::{Deserialize, Serialize}; @@ -72,8 +72,8 @@ pub struct NetworkConfiguration { /// Ping behaviour enabled. /// - /// Send outbound pings to connected peers every 15 seconds and respond to inbound pings. - /// Every sent ping must yield a response within 20 seconds in order to be successful. + /// Send outbound pings to connected peers every 15 seconds and respond to inbound pings. Every + /// sent ping must yield a response within 20 seconds in order to be successful. pub ping: bool, /// QUIC transport port. @@ -103,6 +103,9 @@ pub struct NetworkConfiguration { /// /// Serve as a rendezvous point for peer discovery, allowing peer registration and queries. pub rendezvous_server_enabled: bool, + + /// Our local peer id. + pub peer_id: Option, } impl Default for NetworkConfiguration { @@ -127,11 +130,17 @@ impl Default for NetworkConfiguration { rendezvous_address: None, rendezvous_peer_id: None, rendezvous_server_enabled: false, + peer_id: None, } } } impl NetworkConfiguration { + /// Derive peer id from a given public key. + pub fn set_peer_id(&mut self, public_key: &PublicKey) { + self.peer_id = Some(PeerId::from_public_key(public_key)); + } + /// Define the connection limits of the swarm. pub fn connection_limits(&self) -> ConnectionLimits { ConnectionLimits::default() @@ -144,8 +153,8 @@ impl NetworkConfiguration { /// Load the key pair from the file at the specified path. /// - /// If the file does not exist, a random key pair is generated and saved. - /// If no path is specified, a random key pair is generated. + /// If the file does not exist, a random key pair is generated and saved. If no path is + /// specified, a random key pair is generated. pub fn load_or_generate_key_pair(path: Option) -> Result { let key_pair = match path { Some(mut path) => { diff --git a/aquadoggo/src/network/service.rs b/aquadoggo/src/network/service.rs index 939de12e9..ade96e9cc 100644 --- a/aquadoggo/src/network/service.rs +++ b/aquadoggo/src/network/service.rs @@ -34,9 +34,10 @@ pub async fn network_service( // Read the network configuration parameters from the application context let network_config = context.config.network.clone(); + let local_peer_id = network_config.peer_id.expect("Peer id needs to be given"); // Build the network swarm and retrieve the local peer ID - let (mut swarm, local_peer_id) = swarm::build_swarm(&network_config, key_pair).await?; + let mut swarm = swarm::build_swarm(&network_config, key_pair).await?; // Define the QUIC multiaddress on which the swarm will listen for connections let quic_multiaddr = diff --git a/aquadoggo/src/network/swarm.rs b/aquadoggo/src/network/swarm.rs index 2891871ce..2a31698ed 100644 --- a/aquadoggo/src/network/swarm.rs +++ b/aquadoggo/src/network/swarm.rs @@ -5,7 +5,6 @@ use std::convert::TryInto; use anyhow::Result; use libp2p::identity::Keypair; use libp2p::swarm::SwarmBuilder; -use libp2p::PeerId; use libp2p::Swarm; use log::info; @@ -16,10 +15,9 @@ use crate::network::NetworkConfiguration; pub async fn build_swarm( network_config: &NetworkConfiguration, key_pair: Keypair, -) -> Result<(Swarm, PeerId)> { - // Read the peer ID (public key) from the key pair - let peer_id = PeerId::from(key_pair.public()); - info!("Network service peer ID: {peer_id}"); +) -> Result> { + let peer_id = network_config.peer_id.expect("Peer id needs to be given"); + info!("Network service peer ID: {peer_id}",); let relay_client_enabled = network_config.relay_address.is_some(); @@ -39,5 +37,5 @@ pub async fn build_swarm( .notify_handler_buffer_size(network_config.notify_handler_buffer_size.try_into()?) .build(); - Ok((swarm, peer_id)) + Ok(swarm) } diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index 786b579ab..37a13605a 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -5,6 +5,7 @@ use anyhow::Result; use crate::bus::ServiceSender; use crate::context::Context; use crate::manager::{ServiceReadySender, Shutdown}; +use crate::replication::{SyncIngest, SyncManager}; pub async fn replication_service( context: Context, @@ -12,5 +13,14 @@ pub async fn replication_service( tx: ServiceSender, tx_ready: ServiceReadySender, ) -> Result<()> { + let local_peer_id = context + .config + .network + .peer_id + .expect("Peer id needs to be given"); + + let ingest = SyncIngest::new(context.schema_provider.clone(), tx); + let manager = SyncManager::new(context.store.clone(), ingest, local_peer_id); + Ok(()) } From e7a9bab30cc4fb9dcecd641bd46f903ec1733e66 Mon Sep 17 00:00:00 2001 From: adz Date: Wed, 24 May 2023 15:49:04 +0200 Subject: [PATCH 013/126] Introduce connection manager in replication service --- aquadoggo/src/config.rs | 2 +- aquadoggo/src/network/service.rs | 15 +- aquadoggo/src/replication/service.rs | 130 +++++++++++++++++- aquadoggo/src/replication/strategies/naive.rs | 2 +- aquadoggo/src/replication/target_set.rs | 8 +- 5 files changed, 139 insertions(+), 18 deletions(-) diff --git a/aquadoggo/src/config.rs b/aquadoggo/src/config.rs index 056254d4e..228960c69 100644 --- a/aquadoggo/src/config.rs +++ b/aquadoggo/src/config.rs @@ -99,7 +99,7 @@ impl Configuration { }; // Derive peer id from key pair - let key_pair = NetworkConfiguration::load_or_generate_key_pair(config.base_path)?; + let key_pair = NetworkConfiguration::load_or_generate_key_pair(config.base_path.clone())?; config.network.set_peer_id(&key_pair.public()); Ok(config) diff --git a/aquadoggo/src/network/service.rs b/aquadoggo/src/network/service.rs index ade96e9cc..035066fd5 100644 --- a/aquadoggo/src/network/service.rs +++ b/aquadoggo/src/network/service.rs @@ -152,16 +152,11 @@ impl EventLoop { /// Handle an incoming message via the communication bus from other services. async fn handle_service_message(&mut self, message: ServiceMessage) { - match message { - ServiceMessage::SentReplicationMessage(peer_id, sync_message) => { - self.swarm - .behaviour_mut() - .replication - .send_message(peer_id, sync_message); - } - _ => { - // Ignore all other messages - } + if let ServiceMessage::SentReplicationMessage(peer_id, sync_message) = message { + self.swarm + .behaviour_mut() + .replication + .send_message(peer_id, sync_message); } } diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index 37a13605a..288045974 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -1,15 +1,24 @@ // SPDX-License-Identifier: AGPL-3.0-or-later +use std::collections::HashMap; + use anyhow::Result; +use libp2p::PeerId; +use log::warn; +use p2panda_rs::schema::SchemaId; +use tokio::sync::broadcast::Receiver; +use tokio::task; -use crate::bus::ServiceSender; +use crate::bus::{ServiceMessage, ServiceSender}; use crate::context::Context; +use crate::db::SqlStore; use crate::manager::{ServiceReadySender, Shutdown}; -use crate::replication::{SyncIngest, SyncManager}; +use crate::replication::{SyncIngest, SyncManager, TargetSet}; +use crate::schema::SchemaProvider; pub async fn replication_service( context: Context, - signal: Shutdown, + shutdown: Shutdown, tx: ServiceSender, tx_ready: ServiceReadySender, ) -> Result<()> { @@ -19,8 +28,119 @@ pub async fn replication_service( .peer_id .expect("Peer id needs to be given"); - let ingest = SyncIngest::new(context.schema_provider.clone(), tx); - let manager = SyncManager::new(context.store.clone(), ingest, local_peer_id); + // Define set of schema ids we are interested in + let supported_schema_ids: Vec = context + .schema_provider + .all() + .await + .iter() + .map(|schema| schema.id().to_owned()) + .collect(); + let target_set = TargetSet::new(&supported_schema_ids); + + // Run a connection manager which deals with the replication logic + let manager = ConnectionManager::new( + &context.schema_provider, + &context.store, + &tx, + local_peer_id, + target_set, + ); + + let handle = task::spawn(manager.run()); + + if tx_ready.send(()).is_err() { + warn!("No subscriber informed about replication service being ready"); + }; + + tokio::select! { + _ = handle => (), + _ = shutdown => { + // @TODO: Wait until all pending replication processes are completed during graceful + // shutdown + } + } Ok(()) } + +struct PeerStatus { + peer_id: PeerId, +} + +struct ConnectionManager { + peers: HashMap, + sync_manager: SyncManager, + tx: ServiceSender, + rx: Receiver, + target_set: TargetSet, +} + +impl ConnectionManager { + pub fn new( + schema_provider: &SchemaProvider, + store: &SqlStore, + tx: &ServiceSender, + local_peer_id: PeerId, + target_set: TargetSet, + ) -> Self { + let ingest = SyncIngest::new(schema_provider.clone(), tx.clone()); + let sync_manager = SyncManager::new(store.clone(), ingest, local_peer_id); + + Self { + peers: HashMap::new(), + sync_manager, + tx: tx.clone(), + rx: tx.subscribe(), + target_set, + } + } + + fn send_service_message(&mut self, message: ServiceMessage) { + if self.tx.send(message).is_err() { + // Silently fail here as we don't care if the message was received at this + // point + } + } + + async fn handle_service_message(&mut self, message: ServiceMessage) { + match message { + ServiceMessage::ConnectionEstablished(peer_id) => { + // @TODO + } + ServiceMessage::ConnectionClosed(peer_id) => { + // @TODO + } + ServiceMessage::ReceivedReplicationMessage(peer_id, message) => { + match self.sync_manager.handle_message(&peer_id, &message).await { + Ok(result) => { + for message in result.messages { + self.send_service_message(ServiceMessage::SentReplicationMessage( + peer_id, message, + )); + } + + if result.is_done { + // @TODO + } + } + Err(err) => { + // @TODO + } + } + } + _ => (), // Ignore all other messages + } + } + + pub async fn run(mut self) { + loop { + match self.rx.recv().await { + Ok(message) => self.handle_service_message(message).await, + Err(err) => { + panic!("Service bus subscriber failed: {}", err); + } + } + } + } +} diff --git a/aquadoggo/src/replication/strategies/naive.rs b/aquadoggo/src/replication/strategies/naive.rs index fd0e04d4f..c592b152b 100644 --- a/aquadoggo/src/replication/strategies/naive.rs +++ b/aquadoggo/src/replication/strategies/naive.rs @@ -29,7 +29,7 @@ impl NaiveStrategy { let mut result = vec![]; // For every schema id in the target set retrieve log heights for all contributing authors - for schema_id in self.target_set().0.iter() { + for schema_id in self.target_set().iter() { let log_heights = store .get_log_heights(schema_id) .await diff --git a/aquadoggo/src/replication/target_set.rs b/aquadoggo/src/replication/target_set.rs index 86cb76fe8..898802482 100644 --- a/aquadoggo/src/replication/target_set.rs +++ b/aquadoggo/src/replication/target_set.rs @@ -1,5 +1,7 @@ // SPDX-License-Identifier: AGPL-3.0-or-later +use std::slice::Iter; + use p2panda_rs::schema::SchemaId; use p2panda_rs::Validate; use serde::{Deserialize, Deserializer, Serialize}; @@ -9,7 +11,7 @@ use crate::replication::errors::TargetSetError; /// De-duplicated and sorted set of schema ids which define the target data for the replication /// session. #[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Serialize)] -pub struct TargetSet(pub Vec); +pub struct TargetSet(Vec); impl TargetSet { pub fn new(schema_ids: &[SchemaId]) -> Self { @@ -40,6 +42,10 @@ impl TargetSet { Ok(target_set) } + + pub fn iter(&self) -> Iter { + self.0.iter() + } } impl Validate for TargetSet { From 93ae241ee17e13d4cb73e2730614f8a24f28b0da Mon Sep 17 00:00:00 2001 From: adz Date: Wed, 24 May 2023 15:55:30 +0200 Subject: [PATCH 014/126] Prepare methods for finished or failing sessions --- aquadoggo/src/replication/service.rs | 60 +++++++++++++++++----------- 1 file changed, 37 insertions(+), 23 deletions(-) diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index 288045974..cef0e0b0d 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -13,7 +13,8 @@ use crate::bus::{ServiceMessage, ServiceSender}; use crate::context::Context; use crate::db::SqlStore; use crate::manager::{ServiceReadySender, Shutdown}; -use crate::replication::{SyncIngest, SyncManager, TargetSet}; +use crate::replication::errors::ReplicationError; +use crate::replication::{SyncIngest, SyncManager, SyncMessage, TargetSet}; use crate::schema::SchemaProvider; pub async fn replication_service( @@ -66,6 +67,7 @@ pub async fn replication_service( struct PeerStatus { peer_id: PeerId, + sessions: usize, } struct ConnectionManager { @@ -96,43 +98,55 @@ impl ConnectionManager { } } - fn send_service_message(&mut self, message: ServiceMessage) { - if self.tx.send(message).is_err() { - // Silently fail here as we don't care if the message was received at this - // point + async fn on_connection_established(&mut self, peer_id: PeerId) {} + + async fn on_connection_closed(&mut self, peer_id: PeerId) {} + + async fn on_replication_message(&mut self, peer_id: PeerId, message: SyncMessage) { + match self.sync_manager.handle_message(&peer_id, &message).await { + Ok(result) => { + for message in result.messages { + self.send_service_message(ServiceMessage::SentReplicationMessage( + peer_id, message, + )); + } + + if result.is_done { + self.on_replication_finished(peer_id); + } + } + Err(err) => { + self.on_replication_error(peer_id, err); + } } } + async fn on_replication_finished(&mut self, peer_id: PeerId) {} + + async fn on_replication_error(&mut self, peer_id: PeerId, error: ReplicationError) {} + async fn handle_service_message(&mut self, message: ServiceMessage) { match message { ServiceMessage::ConnectionEstablished(peer_id) => { - // @TODO + self.on_connection_established(peer_id).await; } ServiceMessage::ConnectionClosed(peer_id) => { - // @TODO + self.on_connection_closed(peer_id).await; } ServiceMessage::ReceivedReplicationMessage(peer_id, message) => { - match self.sync_manager.handle_message(&peer_id, &message).await { - Ok(result) => { - for message in result.messages { - self.send_service_message(ServiceMessage::SentReplicationMessage( - peer_id, message, - )); - } - - if result.is_done { - // @TODO - } - } - Err(err) => { - // @TODO - } - } + self.on_replication_message(peer_id, message).await; } _ => (), // Ignore all other messages } } + fn send_service_message(&mut self, message: ServiceMessage) { + if self.tx.send(message).is_err() { + // Silently fail here as we don't care if the message was received at this + // point + } + } + pub async fn run(mut self) { loop { match self.rx.recv().await { From ffd2d09b571f78ba081d61d414f4739f07a49f76 Mon Sep 17 00:00:00 2001 From: adz Date: Wed, 24 May 2023 16:01:26 +0200 Subject: [PATCH 015/126] Add and remove peers in connection manager --- aquadoggo/src/replication/manager.rs | 4 ++++ aquadoggo/src/replication/service.rs | 33 ++++++++++++++++++++++++---- 2 files changed, 33 insertions(+), 4 deletions(-) diff --git a/aquadoggo/src/replication/manager.rs b/aquadoggo/src/replication/manager.rs index d2e037937..049384546 100644 --- a/aquadoggo/src/replication/manager.rs +++ b/aquadoggo/src/replication/manager.rs @@ -61,6 +61,10 @@ where } } + pub fn remove_sessions(&mut self, remote_peer: &P) { + self.sessions.remove(remote_peer); + } + /// Get all sessions related to a remote peer. fn get_sessions(&self, remote_peer: &P) -> Vec { self.sessions diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index cef0e0b0d..8507abb97 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -70,6 +70,15 @@ struct PeerStatus { sessions: usize, } +impl PeerStatus { + pub fn new(peer_id: &PeerId) -> Self { + Self { + peer_id: peer_id.clone(), + sessions: 0, + } + } +} + struct ConnectionManager { peers: HashMap, sync_manager: SyncManager, @@ -98,9 +107,25 @@ impl ConnectionManager { } } - async fn on_connection_established(&mut self, peer_id: PeerId) {} + fn on_connection_established(&mut self, peer_id: PeerId) { + if self + .peers + .insert(peer_id, PeerStatus::new(&peer_id)) + .is_some() + { + warn!("Duplicate established connection encountered"); + } + } + + fn on_connection_closed(&mut self, peer_id: PeerId) { + // Clear running replication sessions from sync manager + self.sync_manager.remove_sessions(&peer_id); - async fn on_connection_closed(&mut self, peer_id: PeerId) {} + // Remove peer from our connections table + if self.peers.remove(&peer_id).is_none() { + warn!("Tried to remove unknown connection"); + } + } async fn on_replication_message(&mut self, peer_id: PeerId, message: SyncMessage) { match self.sync_manager.handle_message(&peer_id, &message).await { @@ -128,10 +153,10 @@ impl ConnectionManager { async fn handle_service_message(&mut self, message: ServiceMessage) { match message { ServiceMessage::ConnectionEstablished(peer_id) => { - self.on_connection_established(peer_id).await; + self.on_connection_established(peer_id); } ServiceMessage::ConnectionClosed(peer_id) => { - self.on_connection_closed(peer_id).await; + self.on_connection_closed(peer_id); } ServiceMessage::ReceivedReplicationMessage(peer_id, message) => { self.on_replication_message(peer_id, message).await; From a113ac48d28d3d5487aaf4d23184f9c541e47403 Mon Sep 17 00:00:00 2001 From: adz Date: Wed, 24 May 2023 16:07:54 +0200 Subject: [PATCH 016/126] Count failed and successful sessions --- aquadoggo/src/replication/service.rs | 34 ++++++++++++++++++++++++---- 1 file changed, 30 insertions(+), 4 deletions(-) diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index 8507abb97..2ad94d432 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -67,14 +67,18 @@ pub async fn replication_service( struct PeerStatus { peer_id: PeerId, - sessions: usize, + active_sessions: usize, + successful_count: usize, + failed_count: usize, } impl PeerStatus { pub fn new(peer_id: &PeerId) -> Self { Self { peer_id: peer_id.clone(), - sessions: 0, + active_sessions: 0, + successful_count: 0, + failed_count: 0, } } } @@ -146,9 +150,31 @@ impl ConnectionManager { } } - async fn on_replication_finished(&mut self, peer_id: PeerId) {} + fn on_replication_finished(&mut self, peer_id: PeerId) { + match self.peers.get_mut(&peer_id) { + Some(status) => { + status.successful_count += 1; + status.active_sessions -= 1; + } + None => { + panic!("Tried to access unknown peer"); + } + } + } - async fn on_replication_error(&mut self, peer_id: PeerId, error: ReplicationError) {} + fn on_replication_error(&mut self, peer_id: PeerId, _error: ReplicationError) { + match self.peers.get_mut(&peer_id) { + Some(status) => { + status.failed_count += 1; + status.active_sessions -= 1; + } + None => { + panic!("Tried to access unknown peer"); + } + } + + // @TODO: SyncManager should remove session internally on critical errors + } async fn handle_service_message(&mut self, message: ServiceMessage) { match message { From 448e8f1670f2a33a316da08b5d220c9356b993c0 Mon Sep 17 00:00:00 2001 From: adz Date: Wed, 24 May 2023 16:39:24 +0200 Subject: [PATCH 017/126] Initiate replication with peers --- aquadoggo/src/replication/manager.rs | 6 ++- aquadoggo/src/replication/service.rs | 79 +++++++++++++++++++++++----- aquadoggo/src/replication/session.rs | 14 ++++- 3 files changed, 84 insertions(+), 15 deletions(-) diff --git a/aquadoggo/src/replication/manager.rs b/aquadoggo/src/replication/manager.rs index 049384546..a7f747612 100644 --- a/aquadoggo/src/replication/manager.rs +++ b/aquadoggo/src/replication/manager.rs @@ -61,12 +61,16 @@ where } } + /// Removes all sessions related to a remote peer. + /// + /// Warning: This might also remove actively running sessions. Do only clear sessions when you + /// are sure they are a) done or b) the peer closed its connection. pub fn remove_sessions(&mut self, remote_peer: &P) { self.sessions.remove(remote_peer); } /// Get all sessions related to a remote peer. - fn get_sessions(&self, remote_peer: &P) -> Vec { + pub fn get_sessions(&self, remote_peer: &P) -> Vec { self.sessions .get(remote_peer) // Always return an array, even when it is empty diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index 2ad94d432..4c926ad1b 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -14,9 +14,11 @@ use crate::context::Context; use crate::db::SqlStore; use crate::manager::{ServiceReadySender, Shutdown}; use crate::replication::errors::ReplicationError; -use crate::replication::{SyncIngest, SyncManager, SyncMessage, TargetSet}; +use crate::replication::{Mode, Session, SyncIngest, SyncManager, SyncMessage, TargetSet}; use crate::schema::SchemaProvider; +const MAX_SESSIONS_PER_PEER: usize = 3; + pub async fn replication_service( context: Context, shutdown: Shutdown, @@ -67,7 +69,6 @@ pub async fn replication_service( struct PeerStatus { peer_id: PeerId, - active_sessions: usize, successful_count: usize, failed_count: usize, } @@ -76,7 +77,6 @@ impl PeerStatus { pub fn new(peer_id: &PeerId) -> Self { Self { peer_id: peer_id.clone(), - active_sessions: 0, successful_count: 0, failed_count: 0, } @@ -111,7 +111,7 @@ impl ConnectionManager { } } - fn on_connection_established(&mut self, peer_id: PeerId) { + async fn on_connection_established(&mut self, peer_id: PeerId) { if self .peers .insert(peer_id, PeerStatus::new(&peer_id)) @@ -119,9 +119,11 @@ impl ConnectionManager { { warn!("Duplicate established connection encountered"); } + + self.update_sessions().await; } - fn on_connection_closed(&mut self, peer_id: PeerId) { + async fn on_connection_closed(&mut self, peer_id: PeerId) { // Clear running replication sessions from sync manager self.sync_manager.remove_sessions(&peer_id); @@ -129,6 +131,8 @@ impl ConnectionManager { if self.peers.remove(&peer_id).is_none() { warn!("Tried to remove unknown connection"); } + + self.update_sessions().await; } async fn on_replication_message(&mut self, peer_id: PeerId, message: SyncMessage) { @@ -141,32 +145,32 @@ impl ConnectionManager { } if result.is_done { - self.on_replication_finished(peer_id); + self.on_replication_finished(peer_id).await; } } Err(err) => { - self.on_replication_error(peer_id, err); + self.on_replication_error(peer_id, err).await; } } } - fn on_replication_finished(&mut self, peer_id: PeerId) { + async fn on_replication_finished(&mut self, peer_id: PeerId) { match self.peers.get_mut(&peer_id) { Some(status) => { status.successful_count += 1; - status.active_sessions -= 1; } None => { panic!("Tried to access unknown peer"); } } + + self.update_sessions().await; } - fn on_replication_error(&mut self, peer_id: PeerId, _error: ReplicationError) { + async fn on_replication_error(&mut self, peer_id: PeerId, _error: ReplicationError) { match self.peers.get_mut(&peer_id) { Some(status) => { status.failed_count += 1; - status.active_sessions -= 1; } None => { panic!("Tried to access unknown peer"); @@ -174,15 +178,17 @@ impl ConnectionManager { } // @TODO: SyncManager should remove session internally on critical errors + + self.update_sessions().await; } async fn handle_service_message(&mut self, message: ServiceMessage) { match message { ServiceMessage::ConnectionEstablished(peer_id) => { - self.on_connection_established(peer_id); + self.on_connection_established(peer_id).await; } ServiceMessage::ConnectionClosed(peer_id) => { - self.on_connection_closed(peer_id); + self.on_connection_closed(peer_id).await; } ServiceMessage::ReceivedReplicationMessage(peer_id, message) => { self.on_replication_message(peer_id, message).await; @@ -198,6 +204,53 @@ impl ConnectionManager { } } + async fn update_sessions(&mut self) { + // Iterate through all currently connected peers + let attempt_peers: Vec = self + .peers + .iter() + .filter_map(|(peer_id, _peer_status)| { + // Find out how many sessions we know about for each peer + let sessions = self.sync_manager.get_sessions(&peer_id); + let active_sessions: Vec<&Session> = sessions + .iter() + .filter(|session| session.is_done()) + .collect(); + + // Check if we're running too many sessions with that peer already + if active_sessions.len() < MAX_SESSIONS_PER_PEER { + return Some(peer_id.to_owned()); + } + + return None; + }) + .collect(); + + for peer_id in attempt_peers { + self.initiate_replication(&peer_id).await; + } + } + + async fn initiate_replication(&mut self, peer_id: &PeerId) { + match self + .sync_manager + .initiate_session(peer_id, &self.target_set, &Mode::Naive) + .await + { + Ok(messages) => { + for message in messages { + self.send_service_message(ServiceMessage::SentReplicationMessage( + peer_id.clone(), + message, + )); + } + } + Err(_err) => { + // @TODO + } + } + } + pub async fn run(mut self) { loop { match self.rx.recv().await { diff --git a/aquadoggo/src/replication/session.rs b/aquadoggo/src/replication/session.rs index 445d72537..50bd63719 100644 --- a/aquadoggo/src/replication/session.rs +++ b/aquadoggo/src/replication/session.rs @@ -74,10 +74,22 @@ impl Session { } } - pub fn live_mode(&self) -> bool { + pub fn is_live_mode(&self) -> bool { self.is_local_live_mode && self.is_remote_live_mode } + pub fn is_pending(&self) -> bool { + return self.state == SessionState::Pending; + } + + pub fn is_established(&self) -> bool { + return self.state == SessionState::Established; + } + + pub fn is_done(&self) -> bool { + return self.state == SessionState::Done; + } + pub fn mode(&self) -> Mode { self.strategy.mode() } From 2e12f3aa51ee625f2ec60a5d45f11480abf5a36f Mon Sep 17 00:00:00 2001 From: adz Date: Wed, 24 May 2023 16:43:55 +0200 Subject: [PATCH 018/126] Add some basic logging --- aquadoggo/src/replication/service.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index 4c926ad1b..f231f11ee 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -4,7 +4,7 @@ use std::collections::HashMap; use anyhow::Result; use libp2p::PeerId; -use log::warn; +use log::{info, warn}; use p2panda_rs::schema::SchemaId; use tokio::sync::broadcast::Receiver; use tokio::task; @@ -155,6 +155,8 @@ impl ConnectionManager { } async fn on_replication_finished(&mut self, peer_id: PeerId) { + info!("Finished replication with peer {}", peer_id); + match self.peers.get_mut(&peer_id) { Some(status) => { status.successful_count += 1; @@ -167,7 +169,9 @@ impl ConnectionManager { self.update_sessions().await; } - async fn on_replication_error(&mut self, peer_id: PeerId, _error: ReplicationError) { + async fn on_replication_error(&mut self, peer_id: PeerId, error: ReplicationError) { + info!("Replication with peer {} failed: {}", peer_id, error); + match self.peers.get_mut(&peer_id) { Some(status) => { status.failed_count += 1; @@ -238,6 +242,8 @@ impl ConnectionManager { .await { Ok(messages) => { + info!("Initiate replication with peer {}", peer_id); + for message in messages { self.send_service_message(ServiceMessage::SentReplicationMessage( peer_id.clone(), From 00a32ba5392bc39ab49429d8e26b385303b5e048 Mon Sep 17 00:00:00 2001 From: adz Date: Wed, 24 May 2023 16:48:35 +0200 Subject: [PATCH 019/126] Do not override with default when building config in cli --- aquadoggo_cli/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aquadoggo_cli/src/main.rs b/aquadoggo_cli/src/main.rs index 33022784a..49780fde4 100644 --- a/aquadoggo_cli/src/main.rs +++ b/aquadoggo_cli/src/main.rs @@ -129,7 +129,7 @@ impl TryFrom for Configuration { rendezvous_address: cli.rendezvous_address, rendezvous_peer_id, rendezvous_server_enabled: cli.enable_rendezvous_server, - ..NetworkConfiguration::default() + ..config.network }; Ok(config) From f89be7e034fa276a232f9c1cfa14751f9d46b0b2 Mon Sep 17 00:00:00 2001 From: adz Date: Wed, 24 May 2023 22:43:02 +0200 Subject: [PATCH 020/126] Fix checking only for certain messages in async loop --- aquadoggo/src/materializer/service.rs | 46 ++++++++++++++------------- 1 file changed, 24 insertions(+), 22 deletions(-) diff --git a/aquadoggo/src/materializer/service.rs b/aquadoggo/src/materializer/service.rs index 90ac41ac8..c97b88263 100644 --- a/aquadoggo/src/materializer/service.rs +++ b/aquadoggo/src/materializer/service.rs @@ -88,28 +88,30 @@ pub async fn materializer_service( // Listen to incoming new entries and operations and move them into task queue let handle = task::spawn(async move { - while let Ok(ServiceMessage::NewOperation(operation_id)) = rx.recv().await { - // Resolve document id of regarding operation - match context - .store - .get_document_id_by_operation_id(&operation_id) - .await - .unwrap_or_else(|_| { - panic!( - "Failed database query when retreiving document for operation_id {}", - operation_id - ) - }) { - Some(document_id) => { - // Dispatch "reduce" task which will materialize the regarding document - factory.queue(Task::new("reduce", TaskInput::new(Some(document_id), None))); - } - None => { - // Panic when we couldn't find the regarding document in the database. We can - // safely assure that this is due to a critical bug affecting the database - // integrity. Panicking here will close `handle` and by that signal a node - // shutdown. - panic!("Could not find document for operation_id {}", operation_id); + loop { + if let Ok(ServiceMessage::NewOperation(operation_id)) = rx.recv().await { + // Resolve document id of regarding operation + match context + .store + .get_document_id_by_operation_id(&operation_id) + .await + .unwrap_or_else(|_| { + panic!( + "Failed database query when retreiving document for operation_id {}", + operation_id + ) + }) { + Some(document_id) => { + // Dispatch "reduce" task which will materialize the regarding document + factory.queue(Task::new("reduce", TaskInput::new(Some(document_id), None))); + } + None => { + // Panic when we couldn't find the regarding document in the database. We can + // safely assure that this is due to a critical bug affecting the database + // integrity. Panicking here will close `handle` and by that signal a node + // shutdown. + panic!("Could not find document for operation_id {}", operation_id); + } } } } From 2f255e4ec3f9a31f7827bb44026928e87cf22d22 Mon Sep 17 00:00:00 2001 From: adz Date: Wed, 24 May 2023 22:54:34 +0200 Subject: [PATCH 021/126] Clippy happy, developer happy --- aquadoggo/src/network/service.rs | 15 +++++++++++---- aquadoggo/src/replication/service.rs | 16 ++++++++++------ aquadoggo/src/replication/session.rs | 6 +++--- 3 files changed, 24 insertions(+), 13 deletions(-) diff --git a/aquadoggo/src/network/service.rs b/aquadoggo/src/network/service.rs index 035066fd5..39373f394 100644 --- a/aquadoggo/src/network/service.rs +++ b/aquadoggo/src/network/service.rs @@ -6,6 +6,7 @@ use libp2p::ping::Event; use libp2p::swarm::{AddressScore, SwarmEvent}; use libp2p::{autonat, identify, mdns, rendezvous, Multiaddr, Swarm}; use log::{debug, info, trace, warn}; +use tokio::task; use tokio_stream::wrappers::BroadcastStream; use tokio_stream::StreamExt; @@ -20,14 +21,17 @@ use crate::network::NetworkConfiguration; /// Network service that configures and deploys a network swarm over QUIC transports. /// -/// The swarm listens for incoming connections, dials remote nodes, manages -/// connections and executes predefined network behaviours. +/// The swarm listens for incoming connections, dials remote nodes, manages connections and +/// executes predefined network behaviours. pub async fn network_service( context: Context, shutdown: Shutdown, tx: ServiceSender, tx_ready: ServiceReadySender, ) -> Result<()> { + // Subscribe to communication bus + let _rx = tx.subscribe(); + // Load the network key pair and peer ID let key_pair = NetworkConfiguration::load_or_generate_key_pair(context.config.base_path.clone())?; @@ -81,7 +85,7 @@ pub async fn network_service( // Spawn a task to run swarm in event loop let event_loop = EventLoop::new(swarm, tx, external_circuit_addr, network_config); - let handle = tokio::spawn(event_loop.run()); + let handle = task::spawn(event_loop.run()); info!("Network service is ready"); @@ -136,7 +140,10 @@ impl EventLoop { panic!("Service bus subscriber for event loop failed: {}", err); } // Command channel closed, thus shutting down the network event loop - None => return, + None => { + warn!("CLOSED"); + return + }, }, } } diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index f231f11ee..a9d8812e6 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -4,7 +4,7 @@ use std::collections::HashMap; use anyhow::Result; use libp2p::PeerId; -use log::{info, warn}; +use log::{info, trace, warn}; use p2panda_rs::schema::SchemaId; use tokio::sync::broadcast::Receiver; use tokio::task; @@ -25,6 +25,9 @@ pub async fn replication_service( tx: ServiceSender, tx_ready: ServiceReadySender, ) -> Result<()> { + // Subscribe to communication bus + let _rx = tx.subscribe(); + let local_peer_id = context .config .network @@ -76,7 +79,7 @@ struct PeerStatus { impl PeerStatus { pub fn new(peer_id: &PeerId) -> Self { Self { - peer_id: peer_id.clone(), + peer_id: *peer_id, successful_count: 0, failed_count: 0, } @@ -202,6 +205,8 @@ impl ConnectionManager { } fn send_service_message(&mut self, message: ServiceMessage) { + trace!("Sending replication message: {:?}", message); + if self.tx.send(message).is_err() { // Silently fail here as we don't care if the message was received at this // point @@ -215,7 +220,7 @@ impl ConnectionManager { .iter() .filter_map(|(peer_id, _peer_status)| { // Find out how many sessions we know about for each peer - let sessions = self.sync_manager.get_sessions(&peer_id); + let sessions = self.sync_manager.get_sessions(peer_id); let active_sessions: Vec<&Session> = sessions .iter() .filter(|session| session.is_done()) @@ -226,7 +231,7 @@ impl ConnectionManager { return Some(peer_id.to_owned()); } - return None; + None }) .collect(); @@ -246,8 +251,7 @@ impl ConnectionManager { for message in messages { self.send_service_message(ServiceMessage::SentReplicationMessage( - peer_id.clone(), - message, + *peer_id, message, )); } } diff --git a/aquadoggo/src/replication/session.rs b/aquadoggo/src/replication/session.rs index 50bd63719..4387d5895 100644 --- a/aquadoggo/src/replication/session.rs +++ b/aquadoggo/src/replication/session.rs @@ -79,15 +79,15 @@ impl Session { } pub fn is_pending(&self) -> bool { - return self.state == SessionState::Pending; + self.state == SessionState::Pending } pub fn is_established(&self) -> bool { - return self.state == SessionState::Established; + self.state == SessionState::Established } pub fn is_done(&self) -> bool { - return self.state == SessionState::Done; + self.state == SessionState::Done } pub fn mode(&self) -> Mode { From 414b866c783a9b980d017d5cc8c274592882f4fe Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Thu, 25 May 2023 16:04:06 +0100 Subject: [PATCH 022/126] Make Domain error in IngestError transparent --- aquadoggo/src/replication/errors.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aquadoggo/src/replication/errors.rs b/aquadoggo/src/replication/errors.rs index 0451886f4..d8eae22d7 100644 --- a/aquadoggo/src/replication/errors.rs +++ b/aquadoggo/src/replication/errors.rs @@ -31,7 +31,7 @@ pub enum IngestError { #[error("Schema is not supported")] UnsupportedSchema, - #[error("Received entry and operation is invalid")] + #[error(transparent)] Domain(#[from] p2panda_rs::api::DomainError), #[error("Decoding entry failed")] From f60b1b2ea2be5da5f0419bb0fd8202dc5108e1df Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Thu, 25 May 2023 16:05:46 +0100 Subject: [PATCH 023/126] Add logging for replication entry exchange --- aquadoggo/src/replication/ingest.rs | 10 +++++ aquadoggo/src/replication/manager.rs | 11 +++++- aquadoggo/src/replication/strategies/diff.rs | 39 +++++++++++++++++-- aquadoggo/src/replication/strategies/naive.rs | 11 ++++++ 4 files changed, 66 insertions(+), 5 deletions(-) diff --git a/aquadoggo/src/replication/ingest.rs b/aquadoggo/src/replication/ingest.rs index e92672c63..da8d18d42 100644 --- a/aquadoggo/src/replication/ingest.rs +++ b/aquadoggo/src/replication/ingest.rs @@ -1,5 +1,7 @@ // SPDX-License-Identifier: AGPL-3.0-or-later +use log::debug; +use p2panda_rs::Human; use p2panda_rs::api::validation::{ ensure_document_not_deleted, get_checked_document_id_for_view_id, get_expected_skiplink, is_next_seq_num, validate_claimed_schema_id, @@ -129,6 +131,14 @@ impl SyncIngest { encoded_operation: &EncodedOperation, ) -> Result<(), IngestError> { let entry = decode_entry(encoded_entry)?; + + debug!( + "Received entry {:?} for log {:?} and {}", + entry.seq_num(), + entry.log_id(), + entry.public_key().display() + ); + let plain_operation = decode_operation(encoded_operation)?; let schema = self diff --git a/aquadoggo/src/replication/manager.rs b/aquadoggo/src/replication/manager.rs index a7f747612..1d9b80902 100644 --- a/aquadoggo/src/replication/manager.rs +++ b/aquadoggo/src/replication/manager.rs @@ -3,6 +3,7 @@ use std::collections::HashMap; use anyhow::Result; +use log::warn; use p2panda_rs::entry::EncodedEntry; use p2panda_rs::operation::EncodedOperation; @@ -324,7 +325,8 @@ where { session.validate_entry(entry_bytes, operation_bytes.as_ref())?; - self.ingest + let result = self + .ingest .handle_entry( &self.store, entry_bytes, @@ -333,7 +335,12 @@ where .as_ref() .expect("For now we always expect an operation here"), ) - .await?; + .await; + + result.map_err(|err| { + warn!("{:?}", err); + err + })?; Ok(SyncResult { messages: vec![], diff --git a/aquadoggo/src/replication/strategies/diff.rs b/aquadoggo/src/replication/strategies/diff.rs index 0956785bf..7f9446121 100644 --- a/aquadoggo/src/replication/strategies/diff.rs +++ b/aquadoggo/src/replication/strategies/diff.rs @@ -1,6 +1,10 @@ // SPDX-License-Identifier: AGPL-3.0-or-later -use p2panda_rs::entry::{LogId, SeqNum}; +use log::debug; +use p2panda_rs::{ + entry::{LogId, SeqNum}, + Human, +}; use crate::replication::LogHeight; @@ -11,8 +15,18 @@ pub fn diff_log_heights( let mut remote_needs = Vec::new(); for (local_author, local_author_logs) in local_log_heights { + debug!( + "Local log heights: {} {:?}", + local_author.display(), + local_author_logs + ); + // Helper for diffing local log heights against remote log heights. let diff_logs = |(remote_log_id, remote_seq_num): (LogId, SeqNum)| { + debug!( + "Remote log height: {:?} {:?}", + remote_log_id, remote_seq_num + ); // Get the remote log by it's id. let local_log = local_author_logs .iter() @@ -21,19 +35,32 @@ pub fn diff_log_heights( match local_log { // If a log exists then compare heights of local and remote logs. Some((log_id, local_seq_num)) => { + debug!("Local log height: {:?} {:?}", log_id, local_seq_num); + // If the local log is higher we increment their log id (we want all entries // greater than or equal to this). Otherwise we return none. if local_seq_num > &remote_seq_num { // We can unwrap as we are incrementing the remote peers seq num here and // this means it's will not reach max seq number. + let next_seq_num = remote_seq_num.clone().next().unwrap(); + + debug!( + "Remote needs entries from {:?} - {:?} for {:?}", + local_seq_num, next_seq_num, log_id + ); + Some((log_id.to_owned(), remote_seq_num.clone().next().unwrap())) } else { + debug!("Remote contains all local entries"); None } } // If no log exists then the remote has never had this log and they need all // entries from seq num 1. - None => Some((remote_log_id.to_owned(), SeqNum::default())), + None => { + debug!("Remote needs all entries from {:?}", remote_log_id); + Some((remote_log_id.to_owned(), SeqNum::default())) + } } }; @@ -41,10 +68,16 @@ pub fn diff_log_heights( // // If none is found we don't do anything as this means we are missing entries they should // send us. - if let Some((_, remote_author_logs)) = remote_log_heights + if let Some((remote_author, remote_author_logs)) = remote_log_heights .iter() .find(|(remote_author, _)| remote_author == local_author) { + debug!( + "Remote log heights: {} {:?}", + remote_author.display(), + remote_author_logs + ); + // Diff our local log heights against the remote. let remote_needs_logs: Vec<(LogId, SeqNum)> = remote_author_logs .iter() diff --git a/aquadoggo/src/replication/strategies/naive.rs b/aquadoggo/src/replication/strategies/naive.rs index c592b152b..185b9cf9e 100644 --- a/aquadoggo/src/replication/strategies/naive.rs +++ b/aquadoggo/src/replication/strategies/naive.rs @@ -2,6 +2,9 @@ use anyhow::Result; use async_trait::async_trait; +use log::debug; +use p2panda_rs::entry::traits::AsEntry; +use p2panda_rs::Human; use crate::db::SqlStore; use crate::replication::errors::ReplicationError; @@ -58,6 +61,14 @@ impl NaiveStrategy { .expect("Fatal database error") .iter() .map(|entry| { + + debug!( + "Prepare message containing entry at {:?} on {:?} for {}", + entry.seq_num(), + entry.log_id(), + entry.public_key().display() + ); + Message::Entry(entry.clone().encoded_entry, entry.payload().cloned()) }) .collect(); From d5612903c5d54095d384a3c1d2febd44dc227208 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Thu, 25 May 2023 19:03:25 +0100 Subject: [PATCH 024/126] Sort system schema to the front of TargetSet --- aquadoggo/src/replication/target_set.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/aquadoggo/src/replication/target_set.rs b/aquadoggo/src/replication/target_set.rs index 898802482..f089a60ba 100644 --- a/aquadoggo/src/replication/target_set.rs +++ b/aquadoggo/src/replication/target_set.rs @@ -26,6 +26,17 @@ impl TargetSet { // Sort schema ids to compare target sets easily deduplicated_set.sort(); + // And now sort system schema to the front of the set. + deduplicated_set.sort_by(|schema_id_a, schema_id_b| { + let is_system_schema = |schema_id: &SchemaId| -> bool { + match schema_id { + SchemaId::Application(_, _) => false, + _ => true, + } + }; + is_system_schema(schema_id_b).cmp(&is_system_schema(schema_id_a)) + }); + Self(deduplicated_set) } From 3d7bd86b3d99fb6386413e9ed8d3ef2f742ea727 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Thu, 25 May 2023 19:04:22 +0100 Subject: [PATCH 025/126] Refactor log height diff logic --- aquadoggo/src/replication/strategies/diff.rs | 143 ++++++++++-------- aquadoggo/src/replication/strategies/naive.rs | 91 ++++++----- 2 files changed, 136 insertions(+), 98 deletions(-) diff --git a/aquadoggo/src/replication/strategies/diff.rs b/aquadoggo/src/replication/strategies/diff.rs index 7f9446121..da63c0c34 100644 --- a/aquadoggo/src/replication/strategies/diff.rs +++ b/aquadoggo/src/replication/strategies/diff.rs @@ -1,16 +1,63 @@ // SPDX-License-Identifier: AGPL-3.0-or-later +use std::collections::HashMap; + use log::debug; use p2panda_rs::{ entry::{LogId, SeqNum}, + identity::PublicKey, Human, }; use crate::replication::LogHeight; +fn remote_requires_entries( + log_id: &LogId, + local_seq_num: &SeqNum, + remote_log_heights: &HashMap, +) -> Option<(LogId, SeqNum)> { + debug!("Local log height: {:?} {:?}", log_id, local_seq_num); + // Get height of the remote log by it's id. + let remote_log_height = remote_log_heights.get(&log_id); + + match remote_log_height { + // If a log exists then compare heights of local and remote logs. + Some(remote_seq_num) => { + debug!("Remote log height: {:?} {:?}", log_id, remote_seq_num); + + // If the local seq num is higher the remote needs all entries higher than + // their max seq num for this log. + if local_seq_num > &remote_seq_num { + // We increment the seq num as we want it to represent an inclusive lower + // bound. + // + // We can unwrap as we are incrementing the lower remote seq num which means it's + // will not reach max seq number. + let from_seq_num = remote_seq_num.clone().next().unwrap(); + + debug!( + "Remote needs entries from {:?} for {:?}", + from_seq_num, log_id + ); + + Some((log_id.to_owned(), from_seq_num)) + } else { + debug!("Remote has all entries for {:?}", log_id); + None + } + } + // If no log exists then the remote has a log we don't know about yet and we + // return nothing. + None => { + debug!("{:?} not found on remote, all entries required", log_id); + Some((log_id.to_owned(), SeqNum::default())) + } + } +} + pub fn diff_log_heights( - local_log_heights: &[LogHeight], - remote_log_heights: &[LogHeight], + local_log_heights: &HashMap>, + remote_log_heights: &HashMap>, ) -> Vec { let mut remote_needs = Vec::new(); @@ -21,75 +68,37 @@ pub fn diff_log_heights( local_author_logs ); - // Helper for diffing local log heights against remote log heights. - let diff_logs = |(remote_log_id, remote_seq_num): (LogId, SeqNum)| { - debug!( - "Remote log height: {:?} {:?}", - remote_log_id, remote_seq_num - ); - // Get the remote log by it's id. - let local_log = local_author_logs - .iter() - .find(|(local_log_id, _)| remote_log_id == *local_log_id); - - match local_log { - // If a log exists then compare heights of local and remote logs. - Some((log_id, local_seq_num)) => { - debug!("Local log height: {:?} {:?}", log_id, local_seq_num); - - // If the local log is higher we increment their log id (we want all entries - // greater than or equal to this). Otherwise we return none. - if local_seq_num > &remote_seq_num { - // We can unwrap as we are incrementing the remote peers seq num here and - // this means it's will not reach max seq number. - let next_seq_num = remote_seq_num.clone().next().unwrap(); - - debug!( - "Remote needs entries from {:?} - {:?} for {:?}", - local_seq_num, next_seq_num, log_id - ); - - Some((log_id.to_owned(), remote_seq_num.clone().next().unwrap())) - } else { - debug!("Remote contains all local entries"); - None - } - } - // If no log exists then the remote has never had this log and they need all - // entries from seq num 1. - None => { - debug!("Remote needs all entries from {:?}", remote_log_id); - Some((remote_log_id.to_owned(), SeqNum::default())) - } - } - }; - - // Find local log for a public key sent by the remote peer. + // Find all logs for a public key sent by the remote peer. // // If none is found we don't do anything as this means we are missing entries they should // send us. - if let Some((remote_author, remote_author_logs)) = remote_log_heights - .iter() - .find(|(remote_author, _)| remote_author == local_author) - { + if let Some(remote_author_logs) = remote_log_heights.get(&local_author) { + let remote_author_logs: HashMap = + remote_author_logs.to_owned().into_iter().collect(); + debug!( "Remote log heights: {} {:?}", - remote_author.display(), + local_author.display(), remote_author_logs ); - // Diff our local log heights against the remote. - let remote_needs_logs: Vec<(LogId, SeqNum)> = remote_author_logs - .iter() - .copied() - .filter_map(diff_logs) - .collect(); + let mut remote_needs_logs = vec![]; + + for (log_id, seq_num) in local_author_logs { + if let Some(from_log_height) = + remote_requires_entries(log_id, seq_num, &remote_author_logs) + { + remote_needs_logs.push(from_log_height) + }; + } // If the remote needs at least one log we push it to the remote needs. if !remote_needs_logs.is_empty() { remote_needs.push((local_author.to_owned(), remote_needs_logs)); }; } else { + // The author we know about locally wasn't found on the remote log heights so they + // need everything we have. remote_needs.push(( local_author.to_owned(), local_author_logs @@ -130,8 +139,14 @@ mod tests { ], )]; - let peer_b_needs = diff_log_heights(&peer_a_log_heights, &peer_b_log_heights); - let peer_a_needs = diff_log_heights(&peer_b_log_heights, &peer_a_log_heights); + let peer_b_needs = diff_log_heights( + &peer_a_log_heights.clone().into_iter().collect(), + &peer_b_log_heights.clone().into_iter().collect(), + ); + let peer_a_needs = diff_log_heights( + &peer_b_log_heights.into_iter().collect(), + &peer_a_log_heights.into_iter().collect(), + ); assert_eq!( peer_a_needs, @@ -152,8 +167,14 @@ mod tests { )]; let peer_b_log_heights = vec![]; - let peer_b_needs = diff_log_heights(&peer_a_log_heights, &peer_b_log_heights); - let peer_a_needs = diff_log_heights(&peer_b_log_heights, &peer_a_log_heights); + let peer_b_needs = diff_log_heights( + &peer_a_log_heights.clone().into_iter().collect(), + &peer_b_log_heights.clone().into_iter().collect(), + ); + let peer_a_needs = diff_log_heights( + &peer_b_log_heights.into_iter().collect(), + &peer_a_log_heights.into_iter().collect(), + ); assert_eq!(peer_a_needs, vec![]); assert_eq!( diff --git a/aquadoggo/src/replication/strategies/naive.rs b/aquadoggo/src/replication/strategies/naive.rs index 185b9cf9e..36379f140 100644 --- a/aquadoggo/src/replication/strategies/naive.rs +++ b/aquadoggo/src/replication/strategies/naive.rs @@ -1,9 +1,15 @@ // SPDX-License-Identifier: AGPL-3.0-or-later +use std::collections::HashMap; + use anyhow::Result; use async_trait::async_trait; -use log::debug; +use log::{debug, info}; use p2panda_rs::entry::traits::AsEntry; +use p2panda_rs::entry::{LogId, SeqNum}; +use p2panda_rs::identity::PublicKey; +use p2panda_rs::schema::SchemaId; +use p2panda_rs::test_utils::fixtures::public_key; use p2panda_rs::Human; use crate::db::SqlStore; @@ -28,19 +34,18 @@ impl NaiveStrategy { } } - async fn local_log_heights(&self, store: &SqlStore) -> Vec { - let mut result = vec![]; - + async fn local_log_heights( + &self, + store: &SqlStore, + schema_id: &SchemaId, + ) -> HashMap> { // For every schema id in the target set retrieve log heights for all contributing authors - for schema_id in self.target_set().iter() { - let log_heights = store - .get_log_heights(schema_id) - .await - .expect("Fatal database error"); - result.extend(log_heights); - } - - result + store + .get_log_heights(schema_id) + .await + .expect("Schema in target set not found in database") + .into_iter() + .collect() } async fn entry_responses( @@ -50,29 +55,38 @@ impl NaiveStrategy { ) -> Vec { let mut messages = Vec::new(); - let local_log_heights = self.local_log_heights(store).await; - let remote_needs = diff_log_heights(&local_log_heights, remote_log_heights); - - for (public_key, log_heights) in remote_needs { - for (log_id, seq_num) in log_heights { - let entry_messages: Vec = store - .get_entries_from(&public_key, &log_id, &seq_num) - .await - .expect("Fatal database error") - .iter() - .map(|entry| { - - debug!( - "Prepare message containing entry at {:?} on {:?} for {}", - entry.seq_num(), - entry.log_id(), - entry.public_key().display() - ); - - Message::Entry(entry.clone().encoded_entry, entry.payload().cloned()) - }) - .collect(); - messages.extend(entry_messages); + for schema_id in self.target_set().iter() { + info!( + "Comparing local and remote state for {}", + schema_id.display() + ); + + let local_log_heights = self.local_log_heights(store, schema_id).await; + let remote_needs = diff_log_heights( + &local_log_heights, + &remote_log_heights.to_owned().into_iter().collect(), + ); + + for (public_key, log_heights) in remote_needs { + for (log_id, seq_num) in log_heights { + let entry_messages: Vec = store + .get_entries_from(&public_key, &log_id, &seq_num) + .await + .expect("Fatal database error") + .iter() + .map(|entry| { + debug!( + "Prepare message containing entry at {:?} on {:?} for {}", + entry.seq_num(), + entry.log_id(), + entry.public_key().display() + ); + + Message::Entry(entry.clone().encoded_entry, entry.payload().cloned()) + }) + .collect(); + messages.extend(entry_messages); + } } } @@ -91,7 +105,10 @@ impl Strategy for NaiveStrategy { } async fn initial_messages(&mut self, store: &SqlStore) -> StrategyResult { - let log_heights = self.local_log_heights(store).await; + let mut log_heights = vec![]; + for schema_id in self.target_set().iter() { + log_heights.extend(self.local_log_heights(store, schema_id).await) + } self.sent_have = true; StrategyResult { From df90bbb4b9d1403a1155357ef5834907bc94ae16 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Thu, 25 May 2023 20:07:15 +0100 Subject: [PATCH 026/126] Don't diff over schema sub-range of target set --- aquadoggo/src/replication/strategies/diff.rs | 5 +- aquadoggo/src/replication/strategies/naive.rs | 88 +++++++++---------- 2 files changed, 44 insertions(+), 49 deletions(-) diff --git a/aquadoggo/src/replication/strategies/diff.rs b/aquadoggo/src/replication/strategies/diff.rs index da63c0c34..fdd0e591f 100644 --- a/aquadoggo/src/replication/strategies/diff.rs +++ b/aquadoggo/src/replication/strategies/diff.rs @@ -62,6 +62,9 @@ pub fn diff_log_heights( let mut remote_needs = Vec::new(); for (local_author, local_author_logs) in local_log_heights { + let local_author_logs: HashMap = + local_author_logs.to_owned().into_iter().collect(); + debug!( "Local log heights: {} {:?}", local_author.display(), @@ -86,7 +89,7 @@ pub fn diff_log_heights( for (log_id, seq_num) in local_author_logs { if let Some(from_log_height) = - remote_requires_entries(log_id, seq_num, &remote_author_logs) + remote_requires_entries(&log_id, &seq_num, &remote_author_logs) { remote_needs_logs.push(from_log_height) }; diff --git a/aquadoggo/src/replication/strategies/naive.rs b/aquadoggo/src/replication/strategies/naive.rs index 36379f140..ed7c6ea3b 100644 --- a/aquadoggo/src/replication/strategies/naive.rs +++ b/aquadoggo/src/replication/strategies/naive.rs @@ -34,18 +34,20 @@ impl NaiveStrategy { } } - async fn local_log_heights( - &self, - store: &SqlStore, - schema_id: &SchemaId, - ) -> HashMap> { - // For every schema id in the target set retrieve log heights for all contributing authors - store - .get_log_heights(schema_id) - .await - .expect("Schema in target set not found in database") - .into_iter() - .collect() + async fn local_log_heights(&self, store: &SqlStore) -> Vec<(PublicKey, Vec<(LogId, SeqNum)>)> { + let mut log_heights: Vec<(PublicKey, Vec<(LogId, SeqNum)>)> = vec![]; + + for schema_id in self.target_set().iter() { + // For every schema id in the target set retrieve log heights for all contributing authors + let logs = store + .get_log_heights(schema_id) + .await + .expect("Schema in target set not found in database") + .into_iter(); + + log_heights.extend(logs); + } + log_heights } async fn entry_responses( @@ -55,38 +57,31 @@ impl NaiveStrategy { ) -> Vec { let mut messages = Vec::new(); - for schema_id in self.target_set().iter() { - info!( - "Comparing local and remote state for {}", - schema_id.display() - ); - - let local_log_heights = self.local_log_heights(store, schema_id).await; - let remote_needs = diff_log_heights( - &local_log_heights, - &remote_log_heights.to_owned().into_iter().collect(), - ); - - for (public_key, log_heights) in remote_needs { - for (log_id, seq_num) in log_heights { - let entry_messages: Vec = store - .get_entries_from(&public_key, &log_id, &seq_num) - .await - .expect("Fatal database error") - .iter() - .map(|entry| { - debug!( - "Prepare message containing entry at {:?} on {:?} for {}", - entry.seq_num(), - entry.log_id(), - entry.public_key().display() - ); - - Message::Entry(entry.clone().encoded_entry, entry.payload().cloned()) - }) - .collect(); - messages.extend(entry_messages); - } + let local_log_heights = self.local_log_heights(store).await; + let remote_needs = diff_log_heights( + &local_log_heights.to_owned().into_iter().collect(), + &remote_log_heights.to_owned().into_iter().collect(), + ); + + for (public_key, log_heights) in remote_needs { + for (log_id, seq_num) in log_heights { + let entry_messages: Vec = store + .get_entries_from(&public_key, &log_id, &seq_num) + .await + .expect("Fatal database error") + .iter() + .map(|entry| { + debug!( + "Prepare message containing entry at {:?} on {:?} for {}", + entry.seq_num(), + entry.log_id(), + entry.public_key().display() + ); + + Message::Entry(entry.clone().encoded_entry, entry.payload().cloned()) + }) + .collect(); + messages.extend(entry_messages); } } @@ -105,10 +100,7 @@ impl Strategy for NaiveStrategy { } async fn initial_messages(&mut self, store: &SqlStore) -> StrategyResult { - let mut log_heights = vec![]; - for schema_id in self.target_set().iter() { - log_heights.extend(self.local_log_heights(store, schema_id).await) - } + let log_heights = self.local_log_heights(store).await; self.sent_have = true; StrategyResult { From 72140839efdde6e4445db6c703f042abfaa2e138 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Fri, 26 May 2023 13:05:43 +0100 Subject: [PATCH 027/126] Introduce DuplicateSessionRequestError --- aquadoggo/src/replication/errors.rs | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/aquadoggo/src/replication/errors.rs b/aquadoggo/src/replication/errors.rs index d8eae22d7..727d77417 100644 --- a/aquadoggo/src/replication/errors.rs +++ b/aquadoggo/src/replication/errors.rs @@ -2,16 +2,15 @@ use thiserror::Error; +use crate::replication::TargetSet; + #[derive(Error, Debug)] pub enum ReplicationError { #[error("Remote peer requested unsupported replication mode")] UnsupportedMode, - #[error("Tried to initialise duplicate inbound replication session with id {0}")] - DuplicateInboundRequest(u64), - - #[error("Tried to initialise duplicate outbound replication session with id {0}")] - DuplicateOutboundRequest(u64), + #[error("Duplicate session error: {0}")] + DuplicateSession(#[from] DuplicateSessionRequestError), #[error("No session found with id {0}")] NoSessionFound(u64), @@ -27,6 +26,7 @@ pub enum ReplicationError { } #[derive(Error, Debug)] +#[error(transparent)] pub enum IngestError { #[error("Schema is not supported")] UnsupportedSchema, @@ -49,3 +49,21 @@ pub enum TargetSetError { #[error("Target set contains unsorted or duplicate schema ids")] UnsortedSchemaIds, } + +#[derive(Error, Debug)] +pub enum DuplicateSessionRequestError { + #[error("Tried to initialise duplicate inbound replication for already established session with id {0}")] + InboundEstablishedSession(u64), + + #[error("Tried to initialise duplicate inbound replication for completed session with id {0}")] + InboundDoneSession(u64), + + #[error("Tried to initialise duplicate inbound replication session for existing target set {0:?}")] + InboundExistingTargetSet(TargetSet), + + #[error("Tried to initialise duplicate outbound replication session for existing target set {0:?}")] + OutboundExistingTargetSet(TargetSet), + + #[error("Tried to initialise duplicate outbound replication session with id {0}")] + Outbound(u64), +} From f96865aa145e8976d6d06e1a1fa422cfe1193c07 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Fri, 26 May 2023 13:05:54 +0100 Subject: [PATCH 028/126] More logging and use new error type --- aquadoggo/src/replication/manager.rs | 51 ++++++++++++++++++---------- aquadoggo/src/replication/service.rs | 15 ++++---- 2 files changed, 42 insertions(+), 24 deletions(-) diff --git a/aquadoggo/src/replication/manager.rs b/aquadoggo/src/replication/manager.rs index 1d9b80902..e1e39656b 100644 --- a/aquadoggo/src/replication/manager.rs +++ b/aquadoggo/src/replication/manager.rs @@ -3,12 +3,12 @@ use std::collections::HashMap; use anyhow::Result; -use log::warn; +use log::{debug, warn, info}; use p2panda_rs::entry::EncodedEntry; use p2panda_rs::operation::EncodedOperation; use crate::db::SqlStore; -use crate::replication::errors::ReplicationError; +use crate::replication::errors::{DuplicateSessionRequestError, ReplicationError}; use crate::replication::{ Message, Mode, Session, SessionId, SessionState, SyncIngest, SyncMessage, TargetSet, }; @@ -51,7 +51,7 @@ pub struct SyncManager

{ impl

SyncManager

where - P: Clone + std::hash::Hash + Eq + PartialOrd, + P: Clone + std::fmt::Debug + std::hash::Hash + Eq + PartialOrd, { pub fn new(store: SqlStore, ingest: SyncIngest, local_peer: P) -> Self { Self { @@ -152,14 +152,17 @@ where let sessions = self.get_sessions(remote_peer); + info!("Initiate outbound replication session with peer {:?}", remote_peer); + // Make sure to not have duplicate sessions over the same schema ids let session = sessions .iter() .find(|session| session.target_set() == *target_set); - if let Some(session) = session { - return Err(ReplicationError::DuplicateOutboundRequest(session.id)); - } + match session { + Some(session) => Err(DuplicateSessionRequestError::OutboundExistingTargetSet(session.target_set())), + None => Ok(()), + }?; // Determine next id for upcoming session let session_id = { @@ -195,14 +198,17 @@ where self.remove_session(remote_peer, &session.id); // Accept the inbound request - true + Ok(true) } else { // Keep our pending session, ignore inbound request - false + Ok(false) } } - _ => return Err(ReplicationError::DuplicateInboundRequest(session.id)), - }; + SessionState::Established => Err( + DuplicateSessionRequestError::InboundEstablishedSession(session.id), + ), + SessionState::Done => Err(DuplicateSessionRequestError::InboundDoneSession(session.id)), + }?; let mut all_messages: Vec = vec![]; @@ -245,6 +251,8 @@ where let sessions = self.get_sessions(remote_peer); + info!("Initiate inbound replication session with peer {:?}", remote_peer); + // Check if a session with this id already exists for this peer, this can happen if both // peers started to initiate a session at the same time, we can try to resolve this if let Some(session) = sessions @@ -258,12 +266,15 @@ where // Check if a session with this target set already exists for this peer, this always gets // rejected because it is clearly redundant - if let Some(session) = sessions + match sessions .iter() .find(|session| session.target_set() == *target_set) { - return Err(ReplicationError::DuplicateInboundRequest(session.id)); - } + Some(session) => Err(DuplicateSessionRequestError::InboundExistingTargetSet( + session.target_set(), + )), + None => Ok(()), + }?; let messages = self .insert_and_initialize_session(remote_peer, session_id, target_set, mode, false) @@ -384,7 +395,7 @@ mod tests { use rstest::rstest; use tokio::sync::broadcast; - use crate::replication::errors::ReplicationError; + use crate::replication::errors::{DuplicateSessionRequestError, ReplicationError}; use crate::replication::message::{Message, HAVE_TYPE, SYNC_DONE_TYPE}; use crate::replication::{Mode, SyncIngest, SyncMessage, TargetSet}; use crate::schema::SchemaProvider; @@ -426,7 +437,9 @@ mod tests { .await; assert!(matches!( result, - Err(ReplicationError::DuplicateOutboundRequest(0)) + Err(ReplicationError::DuplicateSession( + DuplicateSessionRequestError::Outbound(0) + )) )); }) } @@ -458,7 +471,9 @@ mod tests { let result = manager.handle_message(&PEER_ID_REMOTE, &message).await; assert!(matches!( result, - Err(ReplicationError::DuplicateInboundRequest(0)) + Err(ReplicationError::DuplicateSession( + DuplicateSessionRequestError::InboundEstablishedSession(0) + )) )); // Reject different session concerning same target set @@ -467,7 +482,9 @@ mod tests { let result = manager.handle_message(&PEER_ID_REMOTE, &message).await; assert!(matches!( result, - Err(ReplicationError::DuplicateInboundRequest(1)) + Err(ReplicationError::DuplicateSession( + DuplicateSessionRequestError::InboundExistingTargetSet(target_set_2) + )) )); }) } diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index a9d8812e6..cde1a09b2 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -4,7 +4,7 @@ use std::collections::HashMap; use anyhow::Result; use libp2p::PeerId; -use log::{info, trace, warn}; +use log::{info, trace, warn, debug}; use p2panda_rs::schema::SchemaId; use tokio::sync::broadcast::Receiver; use tokio::task; @@ -115,6 +115,7 @@ impl ConnectionManager { } async fn on_connection_established(&mut self, peer_id: PeerId) { + info!("Connection established with peer: {}", peer_id); if self .peers .insert(peer_id, PeerStatus::new(&peer_id)) @@ -128,6 +129,7 @@ impl ConnectionManager { async fn on_connection_closed(&mut self, peer_id: PeerId) { // Clear running replication sessions from sync manager + info!("Connection closed: remove sessions with peer: {}", peer_id); self.sync_manager.remove_sessions(&peer_id); // Remove peer from our connections table @@ -139,6 +141,7 @@ impl ConnectionManager { } async fn on_replication_message(&mut self, peer_id: PeerId, message: SyncMessage) { + trace!("Received SyncMessage: {:?}", message); match self.sync_manager.handle_message(&peer_id, &message).await { Ok(result) => { for message in result.messages { @@ -205,7 +208,7 @@ impl ConnectionManager { } fn send_service_message(&mut self, message: ServiceMessage) { - trace!("Sending replication message: {:?}", message); + trace!("Sending message on service channel: {:?}", message); if self.tx.send(message).is_err() { // Silently fail here as we don't care if the message was received at this @@ -230,7 +233,7 @@ impl ConnectionManager { if active_sessions.len() < MAX_SESSIONS_PER_PEER { return Some(peer_id.to_owned()); } - + debug!("Max sessions reached for peer: {}", peer_id); None }) .collect(); @@ -247,16 +250,14 @@ impl ConnectionManager { .await { Ok(messages) => { - info!("Initiate replication with peer {}", peer_id); - for message in messages { self.send_service_message(ServiceMessage::SentReplicationMessage( *peer_id, message, )); } } - Err(_err) => { - // @TODO + Err(err) => { + warn!("Replication error: {}", err) } } } From 34de9080ad51c3dafabc2fc0ba8adce0c6abd9ee Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Fri, 26 May 2023 13:19:36 +0100 Subject: [PATCH 029/126] Logging for dropping and re-initiating duplicate session requests --- aquadoggo/src/replication/manager.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/aquadoggo/src/replication/manager.rs b/aquadoggo/src/replication/manager.rs index e1e39656b..97ccb8001 100644 --- a/aquadoggo/src/replication/manager.rs +++ b/aquadoggo/src/replication/manager.rs @@ -195,6 +195,7 @@ where SessionState::Pending => { if &self.local_peer < remote_peer { // Drop our pending session + debug!("Drop pending duplicate session with id {} for peer {:?}", session.id, remote_peer); self.remove_session(remote_peer, &session.id); // Accept the inbound request @@ -213,6 +214,7 @@ where let mut all_messages: Vec = vec![]; if accept_inbound_request { + debug!("Accept duplicate session request with id {} for peer {:?}", session.id, remote_peer); let messages = self .insert_and_initialize_session( remote_peer, @@ -227,6 +229,7 @@ where // If we dropped our own outbound session request regarding a different target set, we // need to re-establish it with another session id, otherwise it would get lost if session.target_set() != *target_set { + debug!("Re-initiate dropped session with target set {:?} for peer {:?}", session.target_set(), remote_peer); let messages = self .initiate_session(remote_peer, target_set, &session.mode()) .await?; From 18f7e4886f9e47dbda6b9a491133d758efa9bc32 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Fri, 26 May 2023 14:32:57 +0100 Subject: [PATCH 030/126] Log when re-initiating session with peer --- aquadoggo/src/replication/service.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index cde1a09b2..8984f89ee 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -239,6 +239,7 @@ impl ConnectionManager { .collect(); for peer_id in attempt_peers { + debug!("Re-initiate replication with: {}", peer_id); self.initiate_replication(&peer_id).await; } } From 1b350b6e94f447396ca45dc35702dcc6f27fd9cd Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Fri, 26 May 2023 15:48:25 +0100 Subject: [PATCH 031/126] Fix issue when calculating local log heights --- aquadoggo/src/replication/strategies/naive.rs | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/aquadoggo/src/replication/strategies/naive.rs b/aquadoggo/src/replication/strategies/naive.rs index ed7c6ea3b..09fd73d48 100644 --- a/aquadoggo/src/replication/strategies/naive.rs +++ b/aquadoggo/src/replication/strategies/naive.rs @@ -34,18 +34,23 @@ impl NaiveStrategy { } } - async fn local_log_heights(&self, store: &SqlStore) -> Vec<(PublicKey, Vec<(LogId, SeqNum)>)> { - let mut log_heights: Vec<(PublicKey, Vec<(LogId, SeqNum)>)> = vec![]; + async fn local_log_heights(&self, store: &SqlStore) -> HashMap> { + let mut log_heights: HashMap> = HashMap::new(); for schema_id in self.target_set().iter() { // For every schema id in the target set retrieve log heights for all contributing authors - let logs = store + let schema_logs = store .get_log_heights(schema_id) .await .expect("Schema in target set not found in database") .into_iter(); - log_heights.extend(logs); + // Then merge them into any existing records for the author + for (public_key, logs) in schema_logs { + let mut author_logs = log_heights.get(&public_key).cloned().unwrap_or(vec![]); + author_logs.extend(logs); + log_heights.insert(public_key, author_logs); + } } log_heights } @@ -59,7 +64,7 @@ impl NaiveStrategy { let local_log_heights = self.local_log_heights(store).await; let remote_needs = diff_log_heights( - &local_log_heights.to_owned().into_iter().collect(), + &local_log_heights, &remote_log_heights.to_owned().into_iter().collect(), ); @@ -105,7 +110,7 @@ impl Strategy for NaiveStrategy { StrategyResult { is_local_done: log_heights.is_empty(), - messages: vec![Message::Have(log_heights)], + messages: vec![Message::Have(log_heights.into_iter().collect())], } } From 99ad0a87907306893ca6f409872d6b36166eba45 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Fri, 26 May 2023 17:25:49 +0100 Subject: [PATCH 032/126] More logging in manager --- aquadoggo/src/replication/manager.rs | 44 +++++++++++++++++++++------- 1 file changed, 34 insertions(+), 10 deletions(-) diff --git a/aquadoggo/src/replication/manager.rs b/aquadoggo/src/replication/manager.rs index 97ccb8001..e484b4b92 100644 --- a/aquadoggo/src/replication/manager.rs +++ b/aquadoggo/src/replication/manager.rs @@ -3,7 +3,7 @@ use std::collections::HashMap; use anyhow::Result; -use log::{debug, warn, info}; +use log::{debug, info, warn}; use p2panda_rs::entry::EncodedEntry; use p2panda_rs::operation::EncodedOperation; @@ -152,7 +152,10 @@ where let sessions = self.get_sessions(remote_peer); - info!("Initiate outbound replication session with peer {:?}", remote_peer); + info!( + "Initiate outbound replication session with peer {:?}", + remote_peer + ); // Make sure to not have duplicate sessions over the same schema ids let session = sessions @@ -160,7 +163,9 @@ where .find(|session| session.target_set() == *target_set); match session { - Some(session) => Err(DuplicateSessionRequestError::OutboundExistingTargetSet(session.target_set())), + Some(session) => Err(DuplicateSessionRequestError::OutboundExistingTargetSet( + session.target_set(), + )), None => Ok(()), }?; @@ -195,13 +200,20 @@ where SessionState::Pending => { if &self.local_peer < remote_peer { // Drop our pending session - debug!("Drop pending duplicate session with id {} for peer {:?}", session.id, remote_peer); + debug!( + "Drop pending outbound session and process inbound session request with duplicate id {}", + session.id + ); self.remove_session(remote_peer, &session.id); // Accept the inbound request Ok(true) } else { // Keep our pending session, ignore inbound request + debug!( + "Ignore inbound request and keep pending outbound session with duplicate id {}", + session.id + ); Ok(false) } } @@ -214,7 +226,10 @@ where let mut all_messages: Vec = vec![]; if accept_inbound_request { - debug!("Accept duplicate session request with id {} for peer {:?}", session.id, remote_peer); + debug!( + "Accept duplicate session request with id {} for peer {:?}", + session.id, remote_peer + ); let messages = self .insert_and_initialize_session( remote_peer, @@ -229,7 +244,11 @@ where // If we dropped our own outbound session request regarding a different target set, we // need to re-establish it with another session id, otherwise it would get lost if session.target_set() != *target_set { - debug!("Re-initiate dropped session with target set {:?} for peer {:?}", session.target_set(), remote_peer); + debug!( + "Re-initiate dropped session with target set {:?} for peer {:?}", + session.target_set(), + remote_peer + ); let messages = self .initiate_session(remote_peer, target_set, &session.mode()) .await?; @@ -254,16 +273,20 @@ where let sessions = self.get_sessions(remote_peer); - info!("Initiate inbound replication session with peer {:?}", remote_peer); + info!( + "Initiate inbound replication session with peer {:?}", + remote_peer + ); // Check if a session with this id already exists for this peer, this can happen if both // peers started to initiate a session at the same time, we can try to resolve this - if let Some(session) = sessions + if let Some(existing_session) = sessions .iter() - .find(|session| session.id == *session_id && session.local) + .find(|existing_session| existing_session.id == *session_id && existing_session.local) { + debug!("Handle sync request containing duplicate session id"); return self - .handle_duplicate_session(remote_peer, target_set, session) + .handle_duplicate_session(remote_peer, target_set, existing_session) .await; } @@ -292,6 +315,7 @@ where session_id: &SessionId, message: &Message, ) -> Result { + debug!("Message {message:?} received for session {session_id} with peer {remote_peer:?}"); let sessions = self.sessions.get_mut(remote_peer); let (is_both_done, messages) = match sessions { From 4e0aacf7f4e43bdf3fe70bbfb40089a156aa02d5 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Fri, 26 May 2023 17:26:13 +0100 Subject: [PATCH 033/126] Improve logging message --- aquadoggo/src/replication/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index 8984f89ee..5a5b3c5f2 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -239,7 +239,7 @@ impl ConnectionManager { .collect(); for peer_id in attempt_peers { - debug!("Re-initiate replication with: {}", peer_id); + debug!("Initiate replication with: {}", peer_id); self.initiate_replication(&peer_id).await; } } From c0b6816798d002acadfd1ca7f31eea7d5bb37f35 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Fri, 26 May 2023 17:26:24 +0100 Subject: [PATCH 034/126] Fix diff test --- aquadoggo/src/replication/strategies/diff.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aquadoggo/src/replication/strategies/diff.rs b/aquadoggo/src/replication/strategies/diff.rs index fdd0e591f..f9900ecc5 100644 --- a/aquadoggo/src/replication/strategies/diff.rs +++ b/aquadoggo/src/replication/strategies/diff.rs @@ -185,8 +185,8 @@ mod tests { vec![( author_a, vec![ - (LogId::new(0), SeqNum::new(1).unwrap()), - (LogId::new(1), SeqNum::new(1).unwrap()) + (LogId::new(1), SeqNum::new(1).unwrap()), + (LogId::new(0), SeqNum::new(1).unwrap()) ] ),] ); From 20e21129c3abb1c4116940f8d2621031b01a0584 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Fri, 26 May 2023 17:26:46 +0100 Subject: [PATCH 035/126] Correct expect error message --- aquadoggo/src/replication/strategies/naive.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aquadoggo/src/replication/strategies/naive.rs b/aquadoggo/src/replication/strategies/naive.rs index 09fd73d48..45b5e8d71 100644 --- a/aquadoggo/src/replication/strategies/naive.rs +++ b/aquadoggo/src/replication/strategies/naive.rs @@ -42,7 +42,7 @@ impl NaiveStrategy { let schema_logs = store .get_log_heights(schema_id) .await - .expect("Schema in target set not found in database") + .expect("Fatal database error") .into_iter(); // Then merge them into any existing records for the author From 22e391e5fdb64bf50443082f7c1064f6290f3a64 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Sat, 27 May 2023 18:56:03 +0100 Subject: [PATCH 036/126] Ignore duplicate inbound sync requests --- aquadoggo/src/replication/errors.rs | 5 ++- aquadoggo/src/replication/manager.rs | 59 ++++++++++++++++------------ 2 files changed, 38 insertions(+), 26 deletions(-) diff --git a/aquadoggo/src/replication/errors.rs b/aquadoggo/src/replication/errors.rs index 727d77417..d2e26c986 100644 --- a/aquadoggo/src/replication/errors.rs +++ b/aquadoggo/src/replication/errors.rs @@ -50,8 +50,11 @@ pub enum TargetSetError { UnsortedSchemaIds, } -#[derive(Error, Debug)] +#[derive(Error, Debug, PartialEq)] pub enum DuplicateSessionRequestError { + #[error("Remote sent two sync requests for session with id {0}")] + InboundPendingSession(u64), + #[error("Tried to initialise duplicate inbound replication for already established session with id {0}")] InboundEstablishedSession(u64), diff --git a/aquadoggo/src/replication/manager.rs b/aquadoggo/src/replication/manager.rs index e484b4b92..7ee157530 100644 --- a/aquadoggo/src/replication/manager.rs +++ b/aquadoggo/src/replication/manager.rs @@ -193,18 +193,27 @@ where &mut self, remote_peer: &P, target_set: &TargetSet, - session: &Session, + existing_session: &Session, ) -> Result { - let accept_inbound_request = match session.state { + match existing_session.local { + // Remote peer sent a sync request for an already pending session, we should ignore + // this second request. + false => Err(DuplicateSessionRequestError::InboundPendingSession( + existing_session.id, + )), + _ => Ok(()), + }?; + + let accept_inbound_request = match existing_session.state { // Handle only duplicate sessions when they haven't started yet SessionState::Pending => { if &self.local_peer < remote_peer { // Drop our pending session debug!( "Drop pending outbound session and process inbound session request with duplicate id {}", - session.id + existing_session.id ); - self.remove_session(remote_peer, &session.id); + self.remove_session(remote_peer, &existing_session.id); // Accept the inbound request Ok(true) @@ -212,15 +221,17 @@ where // Keep our pending session, ignore inbound request debug!( "Ignore inbound request and keep pending outbound session with duplicate id {}", - session.id + existing_session.id ); Ok(false) } } SessionState::Established => Err( - DuplicateSessionRequestError::InboundEstablishedSession(session.id), + DuplicateSessionRequestError::InboundEstablishedSession(existing_session.id), ), - SessionState::Done => Err(DuplicateSessionRequestError::InboundDoneSession(session.id)), + SessionState::Done => Err(DuplicateSessionRequestError::InboundDoneSession( + existing_session.id, + )), }?; let mut all_messages: Vec = vec![]; @@ -228,29 +239,29 @@ where if accept_inbound_request { debug!( "Accept duplicate session request with id {} for peer {:?}", - session.id, remote_peer + existing_session.id, remote_peer ); let messages = self .insert_and_initialize_session( remote_peer, - &session.id, + &existing_session.id, target_set, - &session.mode(), + &existing_session.mode(), false, ) .await; - all_messages.extend(to_sync_messages(session.id, messages)); + all_messages.extend(to_sync_messages(existing_session.id, messages)); // If we dropped our own outbound session request regarding a different target set, we // need to re-establish it with another session id, otherwise it would get lost - if session.target_set() != *target_set { + if existing_session.target_set() != *target_set { debug!( "Re-initiate dropped session with target set {:?} for peer {:?}", - session.target_set(), + existing_session.target_set(), remote_peer ); let messages = self - .initiate_session(remote_peer, target_set, &session.mode()) + .initiate_session(remote_peer, target_set, &existing_session.mode()) .await?; all_messages.extend(messages) } @@ -279,10 +290,11 @@ where ); // Check if a session with this id already exists for this peer, this can happen if both - // peers started to initiate a session at the same time, we can try to resolve this + // peers started to initiate a session at the same time, or if the remote peer sent two + // sync request messages with the same session id. if let Some(existing_session) = sessions .iter() - .find(|existing_session| existing_session.id == *session_id && existing_session.local) + .find(|existing_session| existing_session.id == *session_id) { debug!("Handle sync request containing duplicate session id"); return self @@ -475,6 +487,7 @@ mod tests { fn initiate_inbound_session( #[from(random_target_set)] target_set_1: TargetSet, #[from(random_target_set)] target_set_2: TargetSet, + #[from(random_target_set)] target_set_3: TargetSet, ) { test_runner(move |node: TestNode| async move { let (tx, _rx) = broadcast::channel(8); @@ -494,13 +507,11 @@ mod tests { // Reject attempt to create session again let message = - SyncMessage::new(0, Message::SyncRequest(Mode::Naive, target_set_1.clone())); + SyncMessage::new(0, Message::SyncRequest(Mode::Naive, target_set_3.clone())); let result = manager.handle_message(&PEER_ID_REMOTE, &message).await; - assert!(matches!( - result, - Err(ReplicationError::DuplicateSession( - DuplicateSessionRequestError::InboundEstablishedSession(0) - )) + println!("{result:?}"); + assert!(matches!(result, + Err(ReplicationError::DuplicateSession(err)) if err == DuplicateSessionRequestError::InboundPendingSession(0) )); // Reject different session concerning same target set @@ -509,9 +520,7 @@ mod tests { let result = manager.handle_message(&PEER_ID_REMOTE, &message).await; assert!(matches!( result, - Err(ReplicationError::DuplicateSession( - DuplicateSessionRequestError::InboundExistingTargetSet(target_set_2) - )) + Err(ReplicationError::DuplicateSession(err)) if err == DuplicateSessionRequestError::InboundExistingTargetSet(target_set_2) )); }) } From 56718443224ee653154548f34f12238ede37a8a7 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Sun, 28 May 2023 17:59:30 +0100 Subject: [PATCH 037/126] Add messaging diagram to lifetime test --- aquadoggo/src/replication/manager.rs | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/aquadoggo/src/replication/manager.rs b/aquadoggo/src/replication/manager.rs index 7ee157530..430ddcba4 100644 --- a/aquadoggo/src/replication/manager.rs +++ b/aquadoggo/src/replication/manager.rs @@ -581,6 +581,28 @@ mod tests { PEER_ID_REMOTE, ); + // SyncRequest(0, 0, [..])─────────────────────► + // + // ◄───────────────────────────────── Have([..]) + // + // ◄──────────────────────────── SyncDone(false) + // + // Have([..]) ─────────────────────────────────► + // + // Entry(..) ─────────────────────────────────► + // + // Entry(..) ──────────────────────────────────► + // + // Entry(..) ──────────────────────────────────► + // + // Entry(..) ──────────────────────────────────► + // + // Entry(..) ──────────────────────────────────► + // + // Entry(..) ──────────────────────────────────► + // + // SyncDone(false) ────────────────────────────► + // Send `SyncRequest` to remote let messages = manager_a .initiate_session(&PEER_ID_REMOTE, &target_set, &Mode::Naive) @@ -595,7 +617,8 @@ mod tests { )] ); - // Receive `Have` and `SyncDone` from remote + // Remote receives `SyncRequest` + // Send `Have` and `SyncDone` messages back to local let result = manager_b .handle_message(&PEER_ID_LOCAL, &messages[0]) .await @@ -610,6 +633,7 @@ mod tests { ] ); + // Receive `Have` and `SyncDone` messages from remote // Send `Have`, `Entry` and `SyncDone` messages to remote let result_have = manager_a .handle_message(&PEER_ID_REMOTE, &result.messages[0]) @@ -633,7 +657,7 @@ mod tests { SYNC_DONE_TYPE ); - // Receive `SyncDone` from remote + // Remote receives `Have`, `Entry` `SyncDone` messages from local for (index, message) in result_have.messages.iter().enumerate() { let result = manager_b .handle_message(&PEER_ID_LOCAL, &message) From c482b185157d3c79df66360136233130702a7c74 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Sun, 28 May 2023 18:16:16 +0100 Subject: [PATCH 038/126] Logging in behaviour --- aquadoggo/src/network/replication/behaviour.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/aquadoggo/src/network/replication/behaviour.rs b/aquadoggo/src/network/replication/behaviour.rs index 4762d848d..bdedaccc4 100644 --- a/aquadoggo/src/network/replication/behaviour.rs +++ b/aquadoggo/src/network/replication/behaviour.rs @@ -9,6 +9,7 @@ use libp2p::swarm::{ THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; use libp2p::{Multiaddr, PeerId}; +use log::{debug, trace}; use crate::network::replication::handler::{Handler, HandlerInEvent, HandlerOutEvent}; use crate::replication::SyncMessage; @@ -33,6 +34,7 @@ impl Behaviour { impl Behaviour { pub fn send_message(&mut self, peer_id: PeerId, message: SyncMessage) { + trace!("Notify handler of sent sync message: {peer_id} {message:?}"); self.events.push_back(ToSwarm::NotifyHandler { peer_id, event: HandlerInEvent::Message(message), @@ -41,6 +43,7 @@ impl Behaviour { } fn handle_received_message(&mut self, peer_id: &PeerId, message: SyncMessage) { + trace!("Notify swarm of received sync message: {peer_id} {message:?}"); self.events .push_back(ToSwarm::GenerateEvent(Event::MessageReceived( *peer_id, message, @@ -60,6 +63,7 @@ impl NetworkBehaviour for Behaviour { _: &Multiaddr, _: &Multiaddr, ) -> Result, ConnectionDenied> { + debug!("Replication Behaviour: established inbound connection"); Ok(Handler::new()) } @@ -70,6 +74,7 @@ impl NetworkBehaviour for Behaviour { _: &Multiaddr, _: Endpoint, ) -> Result, ConnectionDenied> { + debug!("Replication Behaviour: established outbound connection"); Ok(Handler::new()) } @@ -79,6 +84,7 @@ impl NetworkBehaviour for Behaviour { _connection_id: ConnectionId, handler_event: THandlerOutEvent, ) { + debug!("Replication Behaviour: connection handler event"); match handler_event { HandlerOutEvent::Message(message) => { self.handle_received_message(&peer, message); @@ -109,6 +115,7 @@ impl NetworkBehaviour for Behaviour { _params: &mut impl PollParameters, ) -> Poll>> { if let Some(event) = self.events.pop_front() { + trace!("Poll handler: {event:?}"); return Poll::Ready(event); } From 7db4f1c7d7dc11b656f6e3c48abc3a4a5ae09c97 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Mon, 29 May 2023 13:18:22 +0100 Subject: [PATCH 039/126] Remove re-initiating dropped duplicate sessions if they had a different target set --- aquadoggo/src/replication/manager.rs | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/aquadoggo/src/replication/manager.rs b/aquadoggo/src/replication/manager.rs index 430ddcba4..8627b4f53 100644 --- a/aquadoggo/src/replication/manager.rs +++ b/aquadoggo/src/replication/manager.rs @@ -252,19 +252,8 @@ where .await; all_messages.extend(to_sync_messages(existing_session.id, messages)); - // If we dropped our own outbound session request regarding a different target set, we - // need to re-establish it with another session id, otherwise it would get lost - if existing_session.target_set() != *target_set { - debug!( - "Re-initiate dropped session with target set {:?} for peer {:?}", - existing_session.target_set(), - remote_peer - ); - let messages = self - .initiate_session(remote_peer, target_set, &existing_session.mode()) - .await?; - all_messages.extend(messages) - } + // @TODO: Do we want to re-initiate the dropped session if it was concerning a + // different target set? } Ok(SyncResult { From b035f9a9c99f21099296833224c7db047d568a15 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Mon, 29 May 2023 13:29:44 +0100 Subject: [PATCH 040/126] Diagram for sync lifetime test --- aquadoggo/src/replication/manager.rs | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/aquadoggo/src/replication/manager.rs b/aquadoggo/src/replication/manager.rs index 8627b4f53..301ecceba 100644 --- a/aquadoggo/src/replication/manager.rs +++ b/aquadoggo/src/replication/manager.rs @@ -541,6 +541,31 @@ mod tests { }) } + // PEER A PEER B + // + // SyncRequest(0, 0, [..])─────────────────────► + // + // ◄───────────────────────────────── Have([..]) + // + // ┌─────── SyncDone(false) + // │ + // Have([..]) ──────────┼──────────────────────► + // │ + // Entry(..) ──────────┼──────────────────────► + // │ + // Entry(..) ───────────┼──────────────────────► + // │ + // Entry(..) ───────────┼──────────────────────► + // │ + // Entry(..) ───────────┼──────────────────────► + // │ + // Entry(..) ───────────┼──────────────────────► + // │ + // Entry(..) ───────────┼──────────────────────► + // │ + // SyncDone(false) ─────┼──────────────────────► + // │ + // ◄────────────────────┘ #[rstest] fn sync_lifetime( #[from(populate_store_config)] From af82212bd3a0d148fbebc1d88b22c9f73b6009a8 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Mon, 29 May 2023 13:30:30 +0100 Subject: [PATCH 041/126] Test for concurrent sync request handling --- aquadoggo/src/replication/manager.rs | 124 +++++++++++++++++++++++++++ 1 file changed, 124 insertions(+) diff --git a/aquadoggo/src/replication/manager.rs b/aquadoggo/src/replication/manager.rs index 301ecceba..a7fc9f3fe 100644 --- a/aquadoggo/src/replication/manager.rs +++ b/aquadoggo/src/replication/manager.rs @@ -514,6 +514,130 @@ mod tests { }) } + // PEER A PEER B + // + // SyncRequest(0, 0, ["A"])────────────────────► + // + // ◄─────────────────── SyncRequest(0, 0, ["B"]) + // + // ========== PEER A REQUEST DROPPED =========== + // + // Have([..]) ─────────────────────────────────► + // + // Done(false) ───────────┐ + // │ + // ◄──────────────────────┼────────── Have([..]) + // │ + // ◄──────────────────────┼───────── Done(false) + // │ + // └────────────────────► + // + // ============== SESSION CLOSED =============== + #[rstest] + fn concurrent_requests_different_target_set( + #[from(random_target_set)] target_set_1: TargetSet, + #[from(random_target_set)] target_set_2: TargetSet, + ) { + test_runner(move |node: TestNode| async move { + let mode = Mode::Naive; + let (tx, _rx) = broadcast::channel(8); + let ingest = SyncIngest::new(SchemaProvider::default(), tx); + + // Local peer id is < than remote, this is important for testing the deterministic + // handling of concurrent session requests which contain the same session id. + assert!(PEER_ID_LOCAL < PEER_ID_REMOTE); + + // Local peer A initiates a session with id 0. + let mut manager_a = + SyncManager::new(node.context.store.clone(), ingest.clone(), PEER_ID_LOCAL); + let result = manager_a + .initiate_session(&PEER_ID_REMOTE, &target_set_1, &mode) + .await; + + let sync_messages = result.unwrap(); + assert_eq!(sync_messages.len(), 1); + let sync_request_a = sync_messages[0].clone(); + + // Remote peer B initiates a session with id 0. + let mut manager_b = + SyncManager::new(node.context.store.clone(), ingest, PEER_ID_REMOTE); + let result = manager_b + .initiate_session(&PEER_ID_LOCAL, &target_set_2, &mode) + .await; + + let sync_messages = result.unwrap(); + assert_eq!(sync_messages.len(), 1); + let sync_request_b = sync_messages[0].clone(); + + // Both peers send and handle the requests concurrently. + let result = manager_a + .handle_message(&PEER_ID_REMOTE, &sync_request_b) + .await; + let response = result.unwrap(); + + // We expect Peer A to drop their pending outgoing session and respond to the request + // from Peer B. + assert_eq!(response.messages.len(), 2); + let (have_message_a, done_message_a) = + (response.messages[0].clone(), response.messages[1].clone()); + + let result = manager_b + .handle_message(&PEER_ID_LOCAL, &sync_request_a) + .await; + let response = result.unwrap(); + + // We expect Peer B to drop the incomming request from Peer A and simply wait + // for a response from it's original request. + assert_eq!(response.messages.len(), 0); + + // Both peers have exactly one session running. + let manager_a_sessions = manager_a.get_sessions(&PEER_ID_REMOTE); + assert_eq!(manager_a_sessions.len(), 1); + + let manager_b_sessions = manager_b.get_sessions(&PEER_ID_LOCAL); + assert_eq!(manager_b_sessions.len(), 1); + + // Peer B processes the `Have` and `SyncDone` messages from Peer A. + let result = manager_b + .handle_message(&PEER_ID_LOCAL, &have_message_a) + .await; + let response = result.unwrap(); + assert_eq!(response.messages.len(), 2); + + // They send their own `Have` and `SyncDone` messages. + let (have_message_b, done_message_b) = + (response.messages[0].clone(), response.messages[1].clone()); + + // Sync done, they send no more messages. + let result = manager_b + .handle_message(&PEER_ID_LOCAL, &done_message_a) + .await; + let response = result.unwrap(); + assert_eq!(response.messages.len(), 0); + + // Peer A processes both the `Have` and `SyncDone` messages from Peer B and produces + // no new messages. + let result = manager_a + .handle_message(&PEER_ID_REMOTE, &have_message_b) + .await; + let response = result.unwrap(); + assert_eq!(response.messages.len(), 0); + + let result = manager_a + .handle_message(&PEER_ID_REMOTE, &done_message_b) + .await; + let response = result.unwrap(); + assert_eq!(response.messages.len(), 0); + + // After processing all messages both peers should have no sessions remaining. + let manager_a_sessions = manager_a.get_sessions(&PEER_ID_REMOTE); + assert_eq!(manager_a_sessions.len(), 0); + + let manager_b_sessions = manager_b.get_sessions(&PEER_ID_LOCAL); + assert_eq!(manager_b_sessions.len(), 0); + }) + } + #[rstest] fn inbound_checks_supported_mode(#[from(random_target_set)] target_set: TargetSet) { test_runner(move |node: TestNode| async move { From 62707434d55adbe8c628b09bec82e751f3d9a867 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Mon, 29 May 2023 13:31:02 +0100 Subject: [PATCH 042/126] Remove duplicate diagram --- aquadoggo/src/replication/manager.rs | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/aquadoggo/src/replication/manager.rs b/aquadoggo/src/replication/manager.rs index a7fc9f3fe..330ecd3ae 100644 --- a/aquadoggo/src/replication/manager.rs +++ b/aquadoggo/src/replication/manager.rs @@ -719,28 +719,6 @@ mod tests { PEER_ID_REMOTE, ); - // SyncRequest(0, 0, [..])─────────────────────► - // - // ◄───────────────────────────────── Have([..]) - // - // ◄──────────────────────────── SyncDone(false) - // - // Have([..]) ─────────────────────────────────► - // - // Entry(..) ─────────────────────────────────► - // - // Entry(..) ──────────────────────────────────► - // - // Entry(..) ──────────────────────────────────► - // - // Entry(..) ──────────────────────────────────► - // - // Entry(..) ──────────────────────────────────► - // - // Entry(..) ──────────────────────────────────► - // - // SyncDone(false) ────────────────────────────► - // Send `SyncRequest` to remote let messages = manager_a .initiate_session(&PEER_ID_REMOTE, &target_set, &Mode::Naive) From d7e4c22d1344a4a6cc6c4dc159b87069cfbaa3bc Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Mon, 29 May 2023 13:31:25 +0100 Subject: [PATCH 043/126] Make random target set include more --- aquadoggo/src/test_utils/helpers.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/aquadoggo/src/test_utils/helpers.rs b/aquadoggo/src/test_utils/helpers.rs index 725d88551..3e4726ffe 100644 --- a/aquadoggo/src/test_utils/helpers.rs +++ b/aquadoggo/src/test_utils/helpers.rs @@ -118,8 +118,12 @@ pub fn schema_from_fields(fields: Vec<(&str, OperationValue)>) -> Schema { #[fixture] pub fn random_target_set() -> TargetSet { - let document_view_id = random_document_view_id(); - let schema_id = - SchemaId::new_application(&SchemaName::new("messages").unwrap(), &document_view_id); - TargetSet::new(&[schema_id]) + let system_schema_id = SchemaId::SchemaFieldDefinition(1); + let document_view_id_1 = random_document_view_id(); + let schema_id_1 = + SchemaId::new_application(&SchemaName::new("messages").unwrap(), &document_view_id_1); + let document_view_id_2 = random_document_view_id(); + let schema_id_2 = + SchemaId::new_application(&SchemaName::new("messages").unwrap(), &document_view_id_2); + TargetSet::new(&[system_schema_id, schema_id_1, schema_id_2]) } From 4fe85925be008953876158ec4a25e610f89d9e22 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Mon, 29 May 2023 13:31:56 +0100 Subject: [PATCH 044/126] Small logging and improved comments --- aquadoggo/src/replication/manager.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aquadoggo/src/replication/manager.rs b/aquadoggo/src/replication/manager.rs index 330ecd3ae..91c507885 100644 --- a/aquadoggo/src/replication/manager.rs +++ b/aquadoggo/src/replication/manager.rs @@ -196,7 +196,7 @@ where existing_session: &Session, ) -> Result { match existing_session.local { - // Remote peer sent a sync request for an already pending session, we should ignore + // Remote peer sent a sync request for an already pending inbound session, we should ignore // this second request. false => Err(DuplicateSessionRequestError::InboundPendingSession( existing_session.id, @@ -339,6 +339,7 @@ where // We're done, clean up after ourselves if is_both_done { + debug!("Both peers done, removing session: {session_id:?} {remote_peer:?}"); self.remove_session(remote_peer, session_id); } @@ -498,7 +499,6 @@ mod tests { let message = SyncMessage::new(0, Message::SyncRequest(Mode::Naive, target_set_3.clone())); let result = manager.handle_message(&PEER_ID_REMOTE, &message).await; - println!("{result:?}"); assert!(matches!(result, Err(ReplicationError::DuplicateSession(err)) if err == DuplicateSessionRequestError::InboundPendingSession(0) )); From 7ac01ee3b66b84f863c6e1a59527888e9f1ea9fa Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Mon, 29 May 2023 14:27:37 +0100 Subject: [PATCH 045/126] Elegantly handle concurrent session requests with duplicate target set --- aquadoggo/src/replication/manager.rs | 215 ++++++++++++++++++++++++++- 1 file changed, 207 insertions(+), 8 deletions(-) diff --git a/aquadoggo/src/replication/manager.rs b/aquadoggo/src/replication/manager.rs index 91c507885..d6801b133 100644 --- a/aquadoggo/src/replication/manager.rs +++ b/aquadoggo/src/replication/manager.rs @@ -262,6 +262,70 @@ where }) } + async fn handle_duplicate_target_set( + &mut self, + remote_peer: &P, + session_id: &SessionId, + mode: &Mode, + existing_session: &Session, + ) -> Result { + match existing_session.local { + // Remote peer sent a sync request for an already pending inbound session, we should ignore + // this second request. + false => Err(DuplicateSessionRequestError::InboundExistingTargetSet( + existing_session.target_set(), + )), + _ => Ok(()), + }?; + + let accept_inbound_request = match existing_session.state { + // Handle only duplicate sessions when they haven't started yet + SessionState::Pending => { + if &self.local_peer < remote_peer { + // Drop our pending session + debug!( + "Drop pending outbound session and process inbound session request with duplicate target set {:?}", + existing_session.target_set() + ); + self.remove_session(remote_peer, &existing_session.id); + + // Accept the inbound request + Ok(true) + } else { + // Keep our pending session, ignore inbound request + debug!( + "Ignore inbound request and keep pending outbound session with duplicate target set {:?}", + existing_session.target_set() + ); + Ok(false) + } + } + _ => Err(DuplicateSessionRequestError::InboundExistingTargetSet( + existing_session.target_set(), + )), + }?; + + let mut all_messages: Vec = vec![]; + + if accept_inbound_request { + let messages = self + .insert_and_initialize_session( + remote_peer, + &session_id, + &existing_session.target_set(), + &mode, + false, + ) + .await; + all_messages.extend(to_sync_messages(existing_session.id, messages)); + } + + Ok(SyncResult { + messages: all_messages, + is_done: false, + }) + } + async fn handle_sync_request( &mut self, remote_peer: &P, @@ -293,15 +357,15 @@ where // Check if a session with this target set already exists for this peer, this always gets // rejected because it is clearly redundant - match sessions + if let Some(session) = sessions .iter() .find(|session| session.target_set() == *target_set) { - Some(session) => Err(DuplicateSessionRequestError::InboundExistingTargetSet( - session.target_set(), - )), - None => Ok(()), - }?; + debug!("Handle sync request containing duplicate session id"); + return self + .handle_duplicate_target_set(remote_peer, session_id, mode, session) + .await; + }; let messages = self .insert_and_initialize_session(remote_peer, session_id, target_set, mode, false) @@ -534,7 +598,7 @@ mod tests { // // ============== SESSION CLOSED =============== #[rstest] - fn concurrent_requests_different_target_set( + fn concurrent_requests_duplicate_session_ids( #[from(random_target_set)] target_set_1: TargetSet, #[from(random_target_set)] target_set_2: TargetSet, ) { @@ -638,6 +702,141 @@ mod tests { }) } + // PEER A PEER B + // + // SyncRequest(0, 0, ["A"])────────────────────► + // + // ◄─────────────────── SyncRequest(0, 1, ["A"]) + // + // ========== PEER A REQUEST DROPPED =========== + // + // Have([..]) ─────────────────────────────────► + // + // Done(false) ───────────┐ + // │ + // ◄──────────────────────┼────────── Have([..]) + // │ + // ◄──────────────────────┼───────── Done(false) + // │ + // └────────────────────► + // + // ============== SESSION CLOSED =============== + #[rstest] + fn concurrent_requests_duplicate_target_set( + #[from(random_target_set)] target_set_1: TargetSet, + ) { + test_runner(move |node: TestNode| async move { + let mode = Mode::Naive; + let (tx, _rx) = broadcast::channel(8); + let ingest = SyncIngest::new(SchemaProvider::default(), tx); + + // Local peer id is < than remote, this is important for testing the deterministic + // handling of concurrent session requests which contain the same session id. + assert!(PEER_ID_LOCAL < PEER_ID_REMOTE); + + let mut manager_a = + SyncManager::new(node.context.store.clone(), ingest.clone(), PEER_ID_LOCAL); + + let mut manager_b = + SyncManager::new(node.context.store.clone(), ingest, PEER_ID_REMOTE); + + // Local peer A initiates a session with target set A. + let result = manager_a + .initiate_session(&PEER_ID_REMOTE, &target_set_1, &mode) + .await; + + let sync_messages = result.unwrap(); + assert_eq!(sync_messages.len(), 1); + let sync_request_a = sync_messages[0].clone(); + + // Remote peer B initiates a session with a dummy peer just to increment the session + // id. + let dummy_peer_id = "some_other_peer"; + let _result = manager_b + .initiate_session(&dummy_peer_id, &target_set_1, &mode) + .await; + + // Remote peer B initiates a session with target set A. + let result = manager_b + .initiate_session(&PEER_ID_LOCAL, &target_set_1, &mode) + .await; + + let sync_messages = result.unwrap(); + assert_eq!(sync_messages.len(), 1); + let sync_request_b = sync_messages[0].clone(); + + // Remove the session from the dummy peer. + manager_b.remove_sessions(&dummy_peer_id); + + // Both peers send and handle the requests concurrently. + let result = manager_a + .handle_message(&PEER_ID_REMOTE, &sync_request_b) + .await; + let response = result.unwrap(); + + // We expect Peer A to drop their pending outgoing session and respond to the request + // from Peer B. + assert_eq!(response.messages.len(), 2); + let (have_message_a, done_message_a) = + (response.messages[0].clone(), response.messages[1].clone()); + + let result = manager_b + .handle_message(&PEER_ID_LOCAL, &sync_request_a) + .await; + let response = result.unwrap(); + + // We expect Peer B to drop the incomming request from Peer A and simply wait + // for a response from it's original request. + assert_eq!(response.messages.len(), 0); + + // Both peers have exactly one session running. + let manager_a_sessions = manager_a.get_sessions(&PEER_ID_REMOTE); + assert_eq!(manager_a_sessions.len(), 1); + + let manager_b_sessions = manager_b.get_sessions(&PEER_ID_LOCAL); + assert_eq!(manager_b_sessions.len(), 1); + + // Peer B processes the `Have` and `SyncDone` messages from Peer A. + let result = manager_b + .handle_message(&PEER_ID_LOCAL, &have_message_a) + .await; + let response = result.unwrap(); + assert_eq!(response.messages.len(), 2); + + // They send their own `Have` and `SyncDone` messages. + let (have_message_b, done_message_b) = + (response.messages[0].clone(), response.messages[1].clone()); + + // Sync done, they send no more messages. + let result = manager_b + .handle_message(&PEER_ID_LOCAL, &done_message_a) + .await; + let response = result.unwrap(); + assert_eq!(response.messages.len(), 0); + + // Peer A processes both the `Have` and `SyncDone` messages from Peer B and produces + // no new messages. + let result = manager_a + .handle_message(&PEER_ID_REMOTE, &have_message_b) + .await; + let response = result.unwrap(); + assert_eq!(response.messages.len(), 0); + + let result = manager_a + .handle_message(&PEER_ID_REMOTE, &done_message_b) + .await; + let response = result.unwrap(); + assert_eq!(response.messages.len(), 0); + + // After processing all messages both peers should have no sessions remaining. + let manager_a_sessions = manager_a.get_sessions(&PEER_ID_REMOTE); + assert_eq!(manager_a_sessions.len(), 0); + + let manager_b_sessions = manager_b.get_sessions(&PEER_ID_LOCAL); + assert_eq!(manager_b_sessions.len(), 0); + }) + } + #[rstest] fn inbound_checks_supported_mode(#[from(random_target_set)] target_set: TargetSet) { test_runner(move |node: TestNode| async move { @@ -689,7 +888,7 @@ mod tests { // │ // SyncDone(false) ─────┼──────────────────────► // │ - // ◄────────────────────┘ + // ◄────────────────────┘ #[rstest] fn sync_lifetime( #[from(populate_store_config)] From a2b9493e1a36c223fc72a0b3c8308c3a0830690e Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Mon, 29 May 2023 20:00:08 +0100 Subject: [PATCH 046/126] Correct validation of TargetSet --- aquadoggo/src/replication/target_set.rs | 55 ++++++++++++++++++++++--- 1 file changed, 50 insertions(+), 5 deletions(-) diff --git a/aquadoggo/src/replication/target_set.rs b/aquadoggo/src/replication/target_set.rs index f089a60ba..c144af8f4 100644 --- a/aquadoggo/src/replication/target_set.rs +++ b/aquadoggo/src/replication/target_set.rs @@ -69,13 +69,48 @@ impl Validate for TargetSet { }; let mut prev_schema_id: Option<&SchemaId> = None; + let mut initial_system_schema = true; + + // We need to validate that: + // - if system schema are included they are first in the list and ordered alphabetically + // - any following application schema are also ordered alphabetically + for (index, schema_id) in self.0.iter().enumerate() { + // If the first schema id is an application schema then no system schema should be + // included and we flip the `initial_system_schema` flag. + if index == 0 { + initial_system_schema = match schema_id { + SchemaId::Application(_, _) => false, + _ => true, + } + } - for schema_id in &self.0 { - // Check if it is sorted, this indirectly also checks against duplicates + // Now validate the order. if let Some(prev) = prev_schema_id { - if prev >= schema_id { - return Err(TargetSetError::UnsortedSchemaIds); - } + match schema_id { + // If current and previous are application schema compare them. + SchemaId::Application(_, _) if !initial_system_schema => { + if prev >= schema_id { + return Err(TargetSetError::UnsortedSchemaIds); + } + } + // If the current is an application schema and the previous is a system schema + // flip the `initial_system_schema` flag. + SchemaId::Application(_, _) if initial_system_schema => { + initial_system_schema = false + } + // If the current is a system schema and the `initial_system_schema` flag is + // false then there is an out of order system schema. + _ if !initial_system_schema => { + return Err(TargetSetError::UnsortedSchemaIds); + } + // If current and previous are both system schema then compare them. + _ if initial_system_schema => { + if prev >= schema_id { + return Err(TargetSetError::UnsortedSchemaIds); + } + } + _ => panic!(), + }; } prev_schema_id = Some(schema_id); @@ -105,6 +140,8 @@ mod tests { use p2panda_rs::test_utils::fixtures::random_document_view_id; use rstest::rstest; + use crate::test_utils::helpers::random_target_set; + use super::TargetSet; #[rstest] @@ -151,4 +188,12 @@ mod tests { assert_eq!(result.unwrap_err().to_string(), expected_result.to_string()); } + + #[rstest] + fn serialize(#[from(random_target_set)] target_set: TargetSet) { + assert_eq!( + deserialize_into::(&serialize_value(cbor!(target_set))).unwrap(), + target_set.clone() + ); + } } From ff8668fbb5bb3c0fa13e33def12aa9530c2a13f0 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Mon, 29 May 2023 20:00:28 +0100 Subject: [PATCH 047/126] Better naming in TargetSet fixture --- aquadoggo/src/test_utils/helpers.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aquadoggo/src/test_utils/helpers.rs b/aquadoggo/src/test_utils/helpers.rs index 3e4726ffe..2f87b3837 100644 --- a/aquadoggo/src/test_utils/helpers.rs +++ b/aquadoggo/src/test_utils/helpers.rs @@ -124,6 +124,6 @@ pub fn random_target_set() -> TargetSet { SchemaId::new_application(&SchemaName::new("messages").unwrap(), &document_view_id_1); let document_view_id_2 = random_document_view_id(); let schema_id_2 = - SchemaId::new_application(&SchemaName::new("messages").unwrap(), &document_view_id_2); + SchemaId::new_application(&SchemaName::new("events").unwrap(), &document_view_id_2); TargetSet::new(&[system_schema_id, schema_id_1, schema_id_2]) } From 897bb67e1e028c3b04d79ec51af108de35609b02 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Mon, 29 May 2023 20:01:39 +0100 Subject: [PATCH 048/126] Update tests --- .../src/network/replication/behaviour.rs | 27 ++++++++++++----- aquadoggo/src/replication/manager.rs | 4 +-- aquadoggo/src/replication/strategies/diff.rs | 30 +++++++++++-------- 3 files changed, 38 insertions(+), 23 deletions(-) diff --git a/aquadoggo/src/network/replication/behaviour.rs b/aquadoggo/src/network/replication/behaviour.rs index bdedaccc4..f98ac5d9e 100644 --- a/aquadoggo/src/network/replication/behaviour.rs +++ b/aquadoggo/src/network/replication/behaviour.rs @@ -128,8 +128,13 @@ mod tests { use futures::FutureExt; use libp2p::swarm::{keep_alive, Swarm}; use libp2p_swarm_test::SwarmExt; + use p2panda_rs::schema::SchemaId; + use rstest::rstest; - use crate::replication::{Message, SyncMessage, TargetSet}; + use crate::{ + replication::{Message, SyncMessage, TargetSet}, + test_utils::helpers::random_target_set, + }; use super::{Behaviour as ReplicationBehaviour, Event}; @@ -210,8 +215,14 @@ mod tests { assert!(result.is_err()) } + #[rstest] + #[case(TargetSet::new(&vec![SchemaId::SchemaFieldDefinition(0)]), TargetSet::new(&vec![SchemaId::SchemaDefinition(0)]))] + #[case(random_target_set(), random_target_set())] #[tokio::test] - async fn swarm_behaviour_events() { + async fn swarm_behaviour_events( + #[case] target_set_1: TargetSet, + #[case] target_set_2: TargetSet, + ) { // Create two swarms let mut swarm1 = Swarm::new_ephemeral(|_| ReplicationBehaviour::new()); let mut swarm2 = Swarm::new_ephemeral(|_| ReplicationBehaviour::new()); @@ -226,16 +237,16 @@ mod tests { let swarm1_peer_id = *swarm1.local_peer_id(); let swarm2_peer_id = *swarm2.local_peer_id(); - // Send a message from to swarm1 local peer from swarm2 local peer. + // Send a message from swarm1 to peer2. swarm1.behaviour_mut().send_message( swarm2_peer_id, - SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))), + SyncMessage::new(0, Message::SyncRequest(0.into(), target_set_1.clone())), ); - // Send a message from to swarm2 local peer from swarm1 local peer. + // Send a message from swarm2 peer1. swarm2.behaviour_mut().send_message( swarm1_peer_id, - SyncMessage::new(1, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))), + SyncMessage::new(1, Message::SyncRequest(0.into(), target_set_2.clone())), ); // Collect the next 2 behaviour events which occur in either swarms. @@ -255,7 +266,7 @@ mod tests { assert_eq!(peer_id, &swarm2_peer_id); assert_eq!( message, - &SyncMessage::new(1, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))) + &SyncMessage::new(1, Message::SyncRequest(0.into(), target_set_2.clone())) ); // swarm2 should have received the message from swarm1 peer. @@ -263,7 +274,7 @@ mod tests { assert_eq!(peer_id, &swarm1_peer_id); assert_eq!( message, - &SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))) + &SyncMessage::new(0, Message::SyncRequest(0.into(), target_set_1)) ); } } diff --git a/aquadoggo/src/replication/manager.rs b/aquadoggo/src/replication/manager.rs index d6801b133..1e18fa86c 100644 --- a/aquadoggo/src/replication/manager.rs +++ b/aquadoggo/src/replication/manager.rs @@ -530,9 +530,7 @@ mod tests { .await; assert!(matches!( result, - Err(ReplicationError::DuplicateSession( - DuplicateSessionRequestError::Outbound(0) - )) + Err(ReplicationError::DuplicateSession(err)) if err == DuplicateSessionRequestError::OutboundExistingTargetSet(target_set_1) )); }) } diff --git a/aquadoggo/src/replication/strategies/diff.rs b/aquadoggo/src/replication/strategies/diff.rs index f9900ecc5..6b0047999 100644 --- a/aquadoggo/src/replication/strategies/diff.rs +++ b/aquadoggo/src/replication/strategies/diff.rs @@ -62,15 +62,15 @@ pub fn diff_log_heights( let mut remote_needs = Vec::new(); for (local_author, local_author_logs) in local_log_heights { - let local_author_logs: HashMap = - local_author_logs.to_owned().into_iter().collect(); - debug!( "Local log heights: {} {:?}", local_author.display(), local_author_logs ); + let local_author_logs: HashMap = + local_author_logs.to_owned().into_iter().collect(); + // Find all logs for a public key sent by the remote peer. // // If none is found we don't do anything as this means we are missing entries they should @@ -95,6 +95,9 @@ pub fn diff_log_heights( }; } + // Sort the log heights. + remote_needs_logs.sort(); + // If the remote needs at least one log we push it to the remote needs. if !remote_needs_logs.is_empty() { remote_needs.push((local_author.to_owned(), remote_needs_logs)); @@ -102,13 +105,16 @@ pub fn diff_log_heights( } else { // The author we know about locally wasn't found on the remote log heights so they // need everything we have. - remote_needs.push(( - local_author.to_owned(), - local_author_logs - .iter() - .map(|(log_id, _)| (*log_id, SeqNum::default())) - .collect(), - )); + + let mut remote_needs_logs: Vec<(LogId, SeqNum)> = local_author_logs + .iter() + .map(|(log_id, _)| (*log_id, SeqNum::default())) + .collect(); + + // Sort the log heights. + remote_needs_logs.sort(); + + remote_needs.push((local_author.to_owned(), remote_needs_logs)); } } @@ -185,8 +191,8 @@ mod tests { vec![( author_a, vec![ - (LogId::new(1), SeqNum::new(1).unwrap()), - (LogId::new(0), SeqNum::new(1).unwrap()) + (LogId::new(0), SeqNum::new(1).unwrap()), + (LogId::new(1), SeqNum::new(1).unwrap()) ] ),] ); From 461e676922c53f33291e271038709c9b3f585ada Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Mon, 29 May 2023 20:29:51 +0100 Subject: [PATCH 049/126] Order log heights in Have message --- aquadoggo/src/db/stores/entry.rs | 4 +++- aquadoggo/src/replication/strategies/naive.rs | 6 +++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/aquadoggo/src/db/stores/entry.rs b/aquadoggo/src/db/stores/entry.rs index 3a7a7d220..7b2d47412 100644 --- a/aquadoggo/src/db/stores/entry.rs +++ b/aquadoggo/src/db/stores/entry.rs @@ -336,7 +336,9 @@ impl SqlStore { WHERE logs.schema = $1 GROUP BY - entries.public_key, entries.log_id + entries.public_key, CAST(entries.log_id AS NUMERIC) + ORDER BY + entries.public_key, CAST(entries.log_id AS NUMERIC) ", ) .bind(schema_id.to_string()) diff --git a/aquadoggo/src/replication/strategies/naive.rs b/aquadoggo/src/replication/strategies/naive.rs index 45b5e8d71..299643713 100644 --- a/aquadoggo/src/replication/strategies/naive.rs +++ b/aquadoggo/src/replication/strategies/naive.rs @@ -34,7 +34,10 @@ impl NaiveStrategy { } } - async fn local_log_heights(&self, store: &SqlStore) -> HashMap> { + async fn local_log_heights( + &self, + store: &SqlStore, + ) -> HashMap> { let mut log_heights: HashMap> = HashMap::new(); for schema_id in self.target_set().iter() { @@ -49,6 +52,7 @@ impl NaiveStrategy { for (public_key, logs) in schema_logs { let mut author_logs = log_heights.get(&public_key).cloned().unwrap_or(vec![]); author_logs.extend(logs); + author_logs.sort(); log_heights.insert(public_key, author_logs); } } From 2f842f3dcc9e78afed5a39309d56d90145c6d98c Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Mon, 29 May 2023 20:58:09 +0100 Subject: [PATCH 050/126] Implement Human on Message and SyncMessage --- aquadoggo/src/replication/message.rs | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/aquadoggo/src/replication/message.rs b/aquadoggo/src/replication/message.rs index d310d677f..9e1512f76 100644 --- a/aquadoggo/src/replication/message.rs +++ b/aquadoggo/src/replication/message.rs @@ -6,6 +6,7 @@ use p2panda_rs::entry::EncodedEntry; use p2panda_rs::entry::{LogId, SeqNum}; use p2panda_rs::identity::PublicKey; use p2panda_rs::operation::EncodedOperation; +use p2panda_rs::Human; use serde::de::Visitor; use serde::ser::SerializeSeq; use serde::{Deserialize, Serialize}; @@ -42,6 +43,22 @@ impl Message { } } +impl Human for Message { + fn display(&self) -> String { + match &self { + Message::Have(log_heights) => { + let log_heights: Vec<(String, &Vec<(LogId, SeqNum)>)> = log_heights + .iter() + .map(|(public_key, log_heights)| (public_key.to_string(), log_heights)) + .collect(); + format!("Have({log_heights:?})") + } + message => format!("{message:?}"), + } + } +} + + #[derive(Debug, Clone, Eq, PartialEq)] pub struct SyncMessage(SessionId, Message); @@ -63,6 +80,12 @@ impl SyncMessage { } } +impl Human for SyncMessage { + fn display(&self) -> String { + format!("SyncMessage({:?}, {})", self.0, self.1.display()) + } +} + impl Serialize for SyncMessage { fn serialize(&self, serializer: S) -> Result where From 9a6b37d2945768adfc3a7dd07d231f8d5ce9aa34 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Mon, 29 May 2023 20:58:19 +0100 Subject: [PATCH 051/126] Some work on logging --- aquadoggo/src/network/replication/behaviour.rs | 6 +++--- aquadoggo/src/network/replication/handler.rs | 2 ++ aquadoggo/src/replication/manager.rs | 3 ++- aquadoggo/src/replication/service.rs | 5 ++--- aquadoggo/src/replication/strategies/diff.rs | 2 +- 5 files changed, 10 insertions(+), 8 deletions(-) diff --git a/aquadoggo/src/network/replication/behaviour.rs b/aquadoggo/src/network/replication/behaviour.rs index f98ac5d9e..80f151494 100644 --- a/aquadoggo/src/network/replication/behaviour.rs +++ b/aquadoggo/src/network/replication/behaviour.rs @@ -10,6 +10,7 @@ use libp2p::swarm::{ }; use libp2p::{Multiaddr, PeerId}; use log::{debug, trace}; +use p2panda_rs::Human; use crate::network::replication::handler::{Handler, HandlerInEvent, HandlerOutEvent}; use crate::replication::SyncMessage; @@ -34,7 +35,7 @@ impl Behaviour { impl Behaviour { pub fn send_message(&mut self, peer_id: PeerId, message: SyncMessage) { - trace!("Notify handler of sent sync message: {peer_id} {message:?}"); + trace!("Notify handler of sent sync message: {peer_id} {}", message.display()); self.events.push_back(ToSwarm::NotifyHandler { peer_id, event: HandlerInEvent::Message(message), @@ -43,7 +44,7 @@ impl Behaviour { } fn handle_received_message(&mut self, peer_id: &PeerId, message: SyncMessage) { - trace!("Notify swarm of received sync message: {peer_id} {message:?}"); + trace!("Notify swarm of received sync message: {peer_id} {}", message.display()); self.events .push_back(ToSwarm::GenerateEvent(Event::MessageReceived( *peer_id, message, @@ -115,7 +116,6 @@ impl NetworkBehaviour for Behaviour { _params: &mut impl PollParameters, ) -> Poll>> { if let Some(event) = self.events.pop_front() { - trace!("Poll handler: {event:?}"); return Poll::Ready(event); } diff --git a/aquadoggo/src/network/replication/handler.rs b/aquadoggo/src/network/replication/handler.rs index 681ce230b..5427ea8bd 100644 --- a/aquadoggo/src/network/replication/handler.rs +++ b/aquadoggo/src/network/replication/handler.rs @@ -10,6 +10,7 @@ use libp2p::swarm::handler::{ConnectionEvent, FullyNegotiatedInbound, FullyNegot use libp2p::swarm::{ ConnectionHandler, ConnectionHandlerEvent, KeepAlive, NegotiatedSubstream, SubstreamProtocol, }; +use log::warn; use thiserror::Error; use crate::network::replication::{Codec, CodecError, Protocol}; @@ -231,6 +232,7 @@ impl ConnectionHandler for Handler { match Sink::poll_close(Pin::new(&mut substream), cx) { Poll::Ready(res) => { if res.is_err() { + warn!("{res:#?}") // Don't close the connection but just drop the inbound substream. // In case the remote has more to send, they will open up a new // substream. diff --git a/aquadoggo/src/replication/manager.rs b/aquadoggo/src/replication/manager.rs index 1e18fa86c..b6e80b19c 100644 --- a/aquadoggo/src/replication/manager.rs +++ b/aquadoggo/src/replication/manager.rs @@ -6,6 +6,7 @@ use anyhow::Result; use log::{debug, info, warn}; use p2panda_rs::entry::EncodedEntry; use p2panda_rs::operation::EncodedOperation; +use p2panda_rs::Human; use crate::db::SqlStore; use crate::replication::errors::{DuplicateSessionRequestError, ReplicationError}; @@ -380,7 +381,7 @@ where session_id: &SessionId, message: &Message, ) -> Result { - debug!("Message {message:?} received for session {session_id} with peer {remote_peer:?}"); + debug!("Message received: {session_id} {remote_peer:?} {}", message.display()); let sessions = self.sessions.get_mut(remote_peer); let (is_both_done, messages) = match sessions { diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index 5a5b3c5f2..f6dbad47a 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -6,6 +6,7 @@ use anyhow::Result; use libp2p::PeerId; use log::{info, trace, warn, debug}; use p2panda_rs::schema::SchemaId; +use p2panda_rs::Human; use tokio::sync::broadcast::Receiver; use tokio::task; @@ -141,7 +142,7 @@ impl ConnectionManager { } async fn on_replication_message(&mut self, peer_id: PeerId, message: SyncMessage) { - trace!("Received SyncMessage: {:?}", message); + trace!("Received SyncMessage: {}", message.display()); match self.sync_manager.handle_message(&peer_id, &message).await { Ok(result) => { for message in result.messages { @@ -208,8 +209,6 @@ impl ConnectionManager { } fn send_service_message(&mut self, message: ServiceMessage) { - trace!("Sending message on service channel: {:?}", message); - if self.tx.send(message).is_err() { // Silently fail here as we don't care if the message was received at this // point diff --git a/aquadoggo/src/replication/strategies/diff.rs b/aquadoggo/src/replication/strategies/diff.rs index 6b0047999..86a81beb3 100644 --- a/aquadoggo/src/replication/strategies/diff.rs +++ b/aquadoggo/src/replication/strategies/diff.rs @@ -82,7 +82,7 @@ pub fn diff_log_heights( debug!( "Remote log heights: {} {:?}", local_author.display(), - remote_author_logs + remote_author_logs.clone().into_iter().collect::>().sort() ); let mut remote_needs_logs = vec![]; From 2194cd604fc762397cdbf0211c99d648763354fc Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Mon, 29 May 2023 21:01:39 +0100 Subject: [PATCH 052/126] Fix remote log height logging --- aquadoggo/src/replication/strategies/diff.rs | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/aquadoggo/src/replication/strategies/diff.rs b/aquadoggo/src/replication/strategies/diff.rs index 86a81beb3..5ec280eae 100644 --- a/aquadoggo/src/replication/strategies/diff.rs +++ b/aquadoggo/src/replication/strategies/diff.rs @@ -79,11 +79,14 @@ pub fn diff_log_heights( let remote_author_logs: HashMap = remote_author_logs.to_owned().into_iter().collect(); - debug!( - "Remote log heights: {} {:?}", - local_author.display(), - remote_author_logs.clone().into_iter().collect::>().sort() - ); + debug!("Remote log heights: {} {:?}", local_author.display(), { + let mut logs = remote_author_logs + .clone() + .into_iter() + .collect::>(); + logs.sort(); + logs + }); let mut remote_needs_logs = vec![]; From 848dd5e52f5423ea6a8cd7c26aff03b1583f0b11 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Mon, 29 May 2023 21:02:31 +0100 Subject: [PATCH 053/126] fmt --- aquadoggo/src/network/replication/behaviour.rs | 10 ++++++++-- aquadoggo/src/replication/errors.rs | 8 ++++++-- aquadoggo/src/replication/ingest.rs | 2 +- aquadoggo/src/replication/manager.rs | 5 ++++- aquadoggo/src/replication/message.rs | 1 - aquadoggo/src/replication/service.rs | 2 +- 6 files changed, 20 insertions(+), 8 deletions(-) diff --git a/aquadoggo/src/network/replication/behaviour.rs b/aquadoggo/src/network/replication/behaviour.rs index 80f151494..b6de088ff 100644 --- a/aquadoggo/src/network/replication/behaviour.rs +++ b/aquadoggo/src/network/replication/behaviour.rs @@ -35,7 +35,10 @@ impl Behaviour { impl Behaviour { pub fn send_message(&mut self, peer_id: PeerId, message: SyncMessage) { - trace!("Notify handler of sent sync message: {peer_id} {}", message.display()); + trace!( + "Notify handler of sent sync message: {peer_id} {}", + message.display() + ); self.events.push_back(ToSwarm::NotifyHandler { peer_id, event: HandlerInEvent::Message(message), @@ -44,7 +47,10 @@ impl Behaviour { } fn handle_received_message(&mut self, peer_id: &PeerId, message: SyncMessage) { - trace!("Notify swarm of received sync message: {peer_id} {}", message.display()); + trace!( + "Notify swarm of received sync message: {peer_id} {}", + message.display() + ); self.events .push_back(ToSwarm::GenerateEvent(Event::MessageReceived( *peer_id, message, diff --git a/aquadoggo/src/replication/errors.rs b/aquadoggo/src/replication/errors.rs index d2e26c986..ded29e73b 100644 --- a/aquadoggo/src/replication/errors.rs +++ b/aquadoggo/src/replication/errors.rs @@ -61,10 +61,14 @@ pub enum DuplicateSessionRequestError { #[error("Tried to initialise duplicate inbound replication for completed session with id {0}")] InboundDoneSession(u64), - #[error("Tried to initialise duplicate inbound replication session for existing target set {0:?}")] + #[error( + "Tried to initialise duplicate inbound replication session for existing target set {0:?}" + )] InboundExistingTargetSet(TargetSet), - #[error("Tried to initialise duplicate outbound replication session for existing target set {0:?}")] + #[error( + "Tried to initialise duplicate outbound replication session for existing target set {0:?}" + )] OutboundExistingTargetSet(TargetSet), #[error("Tried to initialise duplicate outbound replication session with id {0}")] diff --git a/aquadoggo/src/replication/ingest.rs b/aquadoggo/src/replication/ingest.rs index da8d18d42..75951db0f 100644 --- a/aquadoggo/src/replication/ingest.rs +++ b/aquadoggo/src/replication/ingest.rs @@ -1,7 +1,6 @@ // SPDX-License-Identifier: AGPL-3.0-or-later use log::debug; -use p2panda_rs::Human; use p2panda_rs::api::validation::{ ensure_document_not_deleted, get_checked_document_id_for_view_id, get_expected_skiplink, is_next_seq_num, validate_claimed_schema_id, @@ -18,6 +17,7 @@ use p2panda_rs::operation::validate::validate_operation_with_entry; use p2panda_rs::operation::{EncodedOperation, Operation, OperationAction, OperationId}; use p2panda_rs::schema::Schema; use p2panda_rs::storage_provider::traits::{EntryStore, LogStore, OperationStore}; +use p2panda_rs::Human; use crate::bus::{ServiceMessage, ServiceSender}; use crate::db::SqlStore; diff --git a/aquadoggo/src/replication/manager.rs b/aquadoggo/src/replication/manager.rs index b6e80b19c..ee5dad5c5 100644 --- a/aquadoggo/src/replication/manager.rs +++ b/aquadoggo/src/replication/manager.rs @@ -381,7 +381,10 @@ where session_id: &SessionId, message: &Message, ) -> Result { - debug!("Message received: {session_id} {remote_peer:?} {}", message.display()); + debug!( + "Message received: {session_id} {remote_peer:?} {}", + message.display() + ); let sessions = self.sessions.get_mut(remote_peer); let (is_both_done, messages) = match sessions { diff --git a/aquadoggo/src/replication/message.rs b/aquadoggo/src/replication/message.rs index 9e1512f76..c88c9c23f 100644 --- a/aquadoggo/src/replication/message.rs +++ b/aquadoggo/src/replication/message.rs @@ -58,7 +58,6 @@ impl Human for Message { } } - #[derive(Debug, Clone, Eq, PartialEq)] pub struct SyncMessage(SessionId, Message); diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index f6dbad47a..3099f375a 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -4,7 +4,7 @@ use std::collections::HashMap; use anyhow::Result; use libp2p::PeerId; -use log::{info, trace, warn, debug}; +use log::{debug, info, trace, warn}; use p2panda_rs::schema::SchemaId; use p2panda_rs::Human; use tokio::sync::broadcast::Receiver; From c3d4c0e70acffed291a14994b4affd6369efd815 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Tue, 30 May 2023 08:46:46 +0100 Subject: [PATCH 054/126] Remove all sessions for a peer on replication error --- aquadoggo/src/replication/service.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index 3099f375a..97efe84d4 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -188,7 +188,8 @@ impl ConnectionManager { } } - // @TODO: SyncManager should remove session internally on critical errors + // @TODO: Ideally we would know which session the error came from and only close that one. + self.sync_manager.remove_sessions(&peer_id); self.update_sessions().await; } From b93efd54716d1a19f01af02d33f74052c02c0359 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Tue, 30 May 2023 21:37:09 +0100 Subject: [PATCH 055/126] Add error logging to handler --- aquadoggo/src/network/replication/handler.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/aquadoggo/src/network/replication/handler.rs b/aquadoggo/src/network/replication/handler.rs index 5427ea8bd..42dbe6255 100644 --- a/aquadoggo/src/network/replication/handler.rs +++ b/aquadoggo/src/network/replication/handler.rs @@ -210,7 +210,8 @@ impl ConnectionHandler for Handler { HandlerOutEvent::Message(message), )); } - Poll::Ready(Some(Err(_))) => { + Poll::Ready(Some(Err(err))) => { + warn!("{err:#?}"); // More serious errors, close this side of the stream. If the peer is // still around, they will re-establish their connection self.inbound_substream = From c632fd1e5126a2e855ab9409a1cc5feb5b59bfa1 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Tue, 30 May 2023 21:39:16 +0100 Subject: [PATCH 056/126] Add ConnectionId to peer identifier in replication service --- aquadoggo/src/bus.rs | 9 +- .../src/network/replication/behaviour.rs | 53 +++++-- aquadoggo/src/network/service.rs | 26 ++-- aquadoggo/src/replication/mod.rs | 2 +- aquadoggo/src/replication/service.rs | 136 ++++++++++++------ 5 files changed, 159 insertions(+), 67 deletions(-) diff --git a/aquadoggo/src/bus.rs b/aquadoggo/src/bus.rs index bf95a13ee..253cfd13d 100644 --- a/aquadoggo/src/bus.rs +++ b/aquadoggo/src/bus.rs @@ -1,6 +1,7 @@ // SPDX-License-Identifier: AGPL-3.0-or-later use libp2p::PeerId; +use libp2p::swarm::ConnectionId; use p2panda_rs::operation::OperationId; use crate::manager::Sender; @@ -16,14 +17,14 @@ pub enum ServiceMessage { NewOperation(OperationId), /// Node established a bi-directional connection to another node. - ConnectionEstablished(PeerId), + ConnectionEstablished(PeerId, ConnectionId), /// Node closed a connection to another node. - ConnectionClosed(PeerId), + ConnectionClosed(PeerId, ConnectionId), /// Node sent a message to remote node for replication. - SentReplicationMessage(PeerId, SyncMessage), + SentReplicationMessage(PeerId, ConnectionId, SyncMessage), /// Node received a message from remote node for replication. - ReceivedReplicationMessage(PeerId, SyncMessage), + ReceivedReplicationMessage(PeerId, ConnectionId, SyncMessage), } diff --git a/aquadoggo/src/network/replication/behaviour.rs b/aquadoggo/src/network/replication/behaviour.rs index b6de088ff..86c10b359 100644 --- a/aquadoggo/src/network/replication/behaviour.rs +++ b/aquadoggo/src/network/replication/behaviour.rs @@ -4,12 +4,13 @@ use std::collections::VecDeque; use std::task::{Context, Poll}; use libp2p::core::Endpoint; +use libp2p::swarm::derive_prelude::ConnectionEstablished; use libp2p::swarm::{ ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, NotifyHandler, PollParameters, - THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, ConnectionClosed, }; use libp2p::{Multiaddr, PeerId}; -use log::{debug, trace}; +use log::{debug, trace, warn}; use p2panda_rs::Human; use crate::network::replication::handler::{Handler, HandlerInEvent, HandlerOutEvent}; @@ -17,7 +18,12 @@ use crate::replication::SyncMessage; #[derive(Debug)] pub enum Event { - MessageReceived(PeerId, SyncMessage), + /// Replication message received on the inbound stream. + MessageReceived(PeerId, SyncMessage, ConnectionId), + + ConnectionEstablished(PeerId, ConnectionId), + + ConnectionClosed(PeerId, ConnectionId), } #[derive(Debug)] @@ -46,14 +52,21 @@ impl Behaviour { }); } - fn handle_received_message(&mut self, peer_id: &PeerId, message: SyncMessage) { + fn handle_received_message( + &mut self, + peer_id: &PeerId, + message: SyncMessage, + connection_id: ConnectionId, + ) { trace!( "Notify swarm of received sync message: {peer_id} {}", message.display() ); self.events .push_back(ToSwarm::GenerateEvent(Event::MessageReceived( - *peer_id, message, + *peer_id, + message, + connection_id, ))); } } @@ -88,22 +101,38 @@ impl NetworkBehaviour for Behaviour { fn on_connection_handler_event( &mut self, peer: PeerId, - _connection_id: ConnectionId, + connection_id: ConnectionId, handler_event: THandlerOutEvent, ) { debug!("Replication Behaviour: connection handler event"); match handler_event { HandlerOutEvent::Message(message) => { - self.handle_received_message(&peer, message); + self.handle_received_message(&peer, message, connection_id); } } } fn on_swarm_event(&mut self, event: FromSwarm) { match event { - FromSwarm::ConnectionEstablished(_) - | FromSwarm::ConnectionClosed(_) - | FromSwarm::AddressChange(_) + FromSwarm::ConnectionEstablished(ConnectionEstablished { + peer_id, + connection_id, + .. + }) => { + self.events + .push_back(ToSwarm::GenerateEvent(Event::ConnectionEstablished( + peer_id, + connection_id, + ))); + } + FromSwarm::ConnectionClosed(ConnectionClosed { peer_id, connection_id, ..}) => { + self.events + .push_back(ToSwarm::GenerateEvent(Event::ConnectionClosed( + peer_id, + connection_id, + ))); + } + FromSwarm::AddressChange(_) | FromSwarm::DialFailure(_) | FromSwarm::ListenFailure(_) | FromSwarm::NewListener(_) @@ -258,8 +287,8 @@ mod tests { // Collect the next 2 behaviour events which occur in either swarms. for _ in 0..2 { tokio::select! { - Event::MessageReceived(peer_id, message) = swarm1.next_behaviour_event() => res1.push((peer_id, message)), - Event::MessageReceived(peer_id, message) = swarm2.next_behaviour_event() => res2.push((peer_id, message)), + Event::MessageReceived(peer_id, message, _) = swarm1.next_behaviour_event() => res1.push((peer_id, message)), + Event::MessageReceived(peer_id, message, _) = swarm2.next_behaviour_event() => res2.push((peer_id, message)), } } diff --git a/aquadoggo/src/network/service.rs b/aquadoggo/src/network/service.rs index 39373f394..3ae9a64b7 100644 --- a/aquadoggo/src/network/service.rs +++ b/aquadoggo/src/network/service.rs @@ -159,7 +159,7 @@ impl EventLoop { /// Handle an incoming message via the communication bus from other services. async fn handle_service_message(&mut self, message: ServiceMessage) { - if let ServiceMessage::SentReplicationMessage(peer_id, sync_message) = message { + if let ServiceMessage::SentReplicationMessage(peer_id, _connection_id, sync_message) = message { self.swarm .behaviour_mut() .replication @@ -202,9 +202,6 @@ impl EventLoop { } } } - - // Inform other services about new connection - self.send_service_message(ServiceMessage::ConnectionEstablished(peer_id)); } SwarmEvent::ConnectionClosed { peer_id, @@ -213,9 +210,6 @@ impl EventLoop { cause, } => { info!("ConnectionClosed: {peer_id} {endpoint:?} {num_established} {cause:?}"); - - // Inform other services about closed connection - self.send_service_message(ServiceMessage::ConnectionClosed(peer_id)); } SwarmEvent::ExpiredListenAddr { listener_id, @@ -429,9 +423,23 @@ impl EventLoop { // Replication // ~~~~~~~~~~~ SwarmEvent::Behaviour(BehaviourEvent::Replication(event)) => match event { - replication::Event::MessageReceived(peer_id, message) => self.send_service_message( - ServiceMessage::ReceivedReplicationMessage(peer_id, message), + replication::Event::MessageReceived(peer_id, message, connection_id) => self.send_service_message( + ServiceMessage::ReceivedReplicationMessage(peer_id, connection_id, message), ), + replication::Event::ConnectionEstablished(peer_id, connection_id) => { + // Inform other services about new connection + self.send_service_message(ServiceMessage::ConnectionEstablished( + peer_id, + connection_id, + )); + } + replication::Event::ConnectionClosed(peer_id, connection_id) => { + // Inform other services about closed connection + self.send_service_message(ServiceMessage::ConnectionClosed( + peer_id, + connection_id, + )); + } }, // ~~~~~~~ diff --git a/aquadoggo/src/replication/mod.rs b/aquadoggo/src/replication/mod.rs index e3584fedc..2195f7b60 100644 --- a/aquadoggo/src/replication/mod.rs +++ b/aquadoggo/src/replication/mod.rs @@ -15,7 +15,7 @@ pub use ingest::SyncIngest; pub use manager::SyncManager; pub use message::{LiveMode, LogHeight, Message, SyncMessage}; pub use mode::Mode; -pub use service::replication_service; +pub use service::{replication_service, PeerConnectionId}; pub use session::{Session, SessionId, SessionState}; pub use strategies::{NaiveStrategy, SetReconciliationStrategy, StrategyResult}; pub use target_set::TargetSet; diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index 97efe84d4..0b1318ade 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -1,8 +1,10 @@ // SPDX-License-Identifier: AGPL-3.0-or-later use std::collections::HashMap; +use std::fmt::Display; use anyhow::Result; +use libp2p::swarm::ConnectionId; use libp2p::PeerId; use log::{debug, info, trace, warn}; use p2panda_rs::schema::SchemaId; @@ -18,7 +20,21 @@ use crate::replication::errors::ReplicationError; use crate::replication::{Mode, Session, SyncIngest, SyncManager, SyncMessage, TargetSet}; use crate::schema::SchemaProvider; -const MAX_SESSIONS_PER_PEER: usize = 3; +const MAX_SESSIONS_PER_CONNECTION: usize = 3; + +#[derive(PartialEq, Eq, PartialOrd, Clone, Debug, Hash)] +pub struct PeerConnectionId(PeerId, Option); + +impl Display for PeerConnectionId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}{:?}", + self.0, + self.1.unwrap_or(ConnectionId::new_unchecked(0)) + ) + } +} pub async fn replication_service( context: Context, @@ -88,8 +104,8 @@ impl PeerStatus { } struct ConnectionManager { - peers: HashMap, - sync_manager: SyncManager, + connections: HashMap, + sync_manager: SyncManager, tx: ServiceSender, rx: Receiver, target_set: TargetSet, @@ -104,10 +120,11 @@ impl ConnectionManager { target_set: TargetSet, ) -> Self { let ingest = SyncIngest::new(schema_provider.clone(), tx.clone()); - let sync_manager = SyncManager::new(store.clone(), ingest, local_peer_id); + let sync_manager = + SyncManager::new(store.clone(), ingest, PeerConnectionId(local_peer_id, None)); Self { - peers: HashMap::new(), + connections: HashMap::new(), sync_manager, tx: tx.clone(), rx: tx.subscribe(), @@ -115,11 +132,14 @@ impl ConnectionManager { } } - async fn on_connection_established(&mut self, peer_id: PeerId) { + async fn on_connection_established(&mut self, peer_id: PeerId, connection_id: ConnectionId) { info!("Connection established with peer: {}", peer_id); if self - .peers - .insert(peer_id, PeerStatus::new(&peer_id)) + .connections + .insert( + PeerConnectionId(peer_id, Some(connection_id)), + PeerStatus::new(&peer_id), + ) .is_some() { warn!("Duplicate established connection encountered"); @@ -128,43 +148,62 @@ impl ConnectionManager { self.update_sessions().await; } - async fn on_connection_closed(&mut self, peer_id: PeerId) { + async fn on_connection_closed(&mut self, peer_id: PeerId, connection_id: ConnectionId) { // Clear running replication sessions from sync manager info!("Connection closed: remove sessions with peer: {}", peer_id); - self.sync_manager.remove_sessions(&peer_id); + self.sync_manager + .remove_sessions(&PeerConnectionId(peer_id, Some(connection_id))); // Remove peer from our connections table - if self.peers.remove(&peer_id).is_none() { + if self + .connections + .remove(&PeerConnectionId(peer_id, Some(connection_id))) + .is_none() + { warn!("Tried to remove unknown connection"); } self.update_sessions().await; } - async fn on_replication_message(&mut self, peer_id: PeerId, message: SyncMessage) { + async fn on_replication_message( + &mut self, + peer_id: PeerId, + message: SyncMessage, + connection_id: ConnectionId, + ) { trace!("Received SyncMessage: {}", message.display()); - match self.sync_manager.handle_message(&peer_id, &message).await { + match self + .sync_manager + .handle_message(&PeerConnectionId(peer_id, Some(connection_id)), &message) + .await + { Ok(result) => { for message in result.messages { self.send_service_message(ServiceMessage::SentReplicationMessage( - peer_id, message, + peer_id, + connection_id, + message, )); } if result.is_done { - self.on_replication_finished(peer_id).await; + self.on_replication_finished(peer_id, connection_id).await; } } Err(err) => { - self.on_replication_error(peer_id, err).await; + self.on_replication_error(peer_id, connection_id, err).await; } } } - async fn on_replication_finished(&mut self, peer_id: PeerId) { + async fn on_replication_finished(&mut self, peer_id: PeerId, connection_id: ConnectionId) { info!("Finished replication with peer {}", peer_id); - match self.peers.get_mut(&peer_id) { + match self + .connections + .get_mut(&PeerConnectionId(peer_id, Some(connection_id))) + { Some(status) => { status.successful_count += 1; } @@ -176,10 +215,18 @@ impl ConnectionManager { self.update_sessions().await; } - async fn on_replication_error(&mut self, peer_id: PeerId, error: ReplicationError) { + async fn on_replication_error( + &mut self, + peer_id: PeerId, + connection_id: ConnectionId, + error: ReplicationError, + ) { info!("Replication with peer {} failed: {}", peer_id, error); - match self.peers.get_mut(&peer_id) { + match self + .connections + .get_mut(&PeerConnectionId(peer_id, Some(connection_id))) + { Some(status) => { status.failed_count += 1; } @@ -189,21 +236,23 @@ impl ConnectionManager { } // @TODO: Ideally we would know which session the error came from and only close that one. - self.sync_manager.remove_sessions(&peer_id); + self.sync_manager + .remove_sessions(&PeerConnectionId(peer_id, Some(connection_id))); self.update_sessions().await; } async fn handle_service_message(&mut self, message: ServiceMessage) { match message { - ServiceMessage::ConnectionEstablished(peer_id) => { - self.on_connection_established(peer_id).await; + ServiceMessage::ConnectionEstablished(peer_id, connection_id) => { + self.on_connection_established(peer_id, connection_id).await; } - ServiceMessage::ConnectionClosed(peer_id) => { - self.on_connection_closed(peer_id).await; + ServiceMessage::ConnectionClosed(peer_id, connection_id) => { + self.on_connection_closed(peer_id, connection_id).await; } - ServiceMessage::ReceivedReplicationMessage(peer_id, message) => { - self.on_replication_message(peer_id, message).await; + ServiceMessage::ReceivedReplicationMessage(peer_id, connection_id, message) => { + self.on_replication_message(peer_id, message, connection_id) + .await; } _ => (), // Ignore all other messages } @@ -218,42 +267,47 @@ impl ConnectionManager { async fn update_sessions(&mut self) { // Iterate through all currently connected peers - let attempt_peers: Vec = self - .peers + let attempt_peers: Vec = self + .connections .iter() - .filter_map(|(peer_id, _peer_status)| { + .filter_map(|(peer_connection_id, peer_status)| { // Find out how many sessions we know about for each peer - let sessions = self.sync_manager.get_sessions(peer_id); + let sessions = self.sync_manager.get_sessions(&peer_connection_id); let active_sessions: Vec<&Session> = sessions .iter() .filter(|session| session.is_done()) .collect(); - // Check if we're running too many sessions with that peer already - if active_sessions.len() < MAX_SESSIONS_PER_PEER { - return Some(peer_id.to_owned()); + // Check if we're running too many sessions with that peer on this connection already + if active_sessions.len() < MAX_SESSIONS_PER_CONNECTION { + return Some(peer_connection_id.to_owned()); } - debug!("Max sessions reached for peer: {}", peer_id); + debug!( + "Max sessions reached for connection: {:?}", + peer_connection_id + ); None }) .collect(); - for peer_id in attempt_peers { - debug!("Initiate replication with: {}", peer_id); - self.initiate_replication(&peer_id).await; + for peer_connection_id in attempt_peers { + debug!("Initiate replication with: {:?}", peer_connection_id); + self.initiate_replication(&peer_connection_id).await; } } - async fn initiate_replication(&mut self, peer_id: &PeerId) { + async fn initiate_replication(&mut self, peer_connection_id: &PeerConnectionId) { match self .sync_manager - .initiate_session(peer_id, &self.target_set, &Mode::Naive) + .initiate_session(peer_connection_id, &self.target_set, &Mode::Naive) .await { Ok(messages) => { for message in messages { self.send_service_message(ServiceMessage::SentReplicationMessage( - *peer_id, message, + peer_connection_id.0, + peer_connection_id.1.expect("Remote peer found without a connection id"), + message, )); } } From 9364ccbe83c354afa6e0ab412ea8cf82edcb541a Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Tue, 30 May 2023 21:43:54 +0100 Subject: [PATCH 057/126] Doc string for PeerConnectionIdentifier --- aquadoggo/src/replication/service.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index 0b1318ade..a43d0979b 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -23,6 +23,12 @@ use crate::schema::SchemaProvider; const MAX_SESSIONS_PER_CONNECTION: usize = 3; #[derive(PartialEq, Eq, PartialOrd, Clone, Debug, Hash)] + +/// Identifier for a connection to another peer. The `ConnectionId` is optional as we need to +/// identify the local peer in the replication manager, but in this case there is no single +/// connection to associate with. +/// +/// @TODO: This could be modelled better maybe... pub struct PeerConnectionId(PeerId, Option); impl Display for PeerConnectionId { From 286bad9e760c89989ce562e41d6db818d4be11a7 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Tue, 30 May 2023 21:44:40 +0100 Subject: [PATCH 058/126] Add comment to PeerConnectionId defaults --- aquadoggo/src/replication/service.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index a43d0979b..bf1b17bb6 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -37,6 +37,7 @@ impl Display for PeerConnectionId { f, "{}{:?}", self.0, + // If we don't pass a connection id then we default to `0` self.1.unwrap_or(ConnectionId::new_unchecked(0)) ) } From f87af83fdd13c7b4c179fa8924e5d2d5a9d6ffb9 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Wed, 31 May 2023 00:36:07 +0100 Subject: [PATCH 059/126] Add (very) basic replication scheduler --- aquadoggo/src/bus.rs | 3 + .../src/network/replication/behaviour.rs | 51 +++++++---- aquadoggo/src/network/service.rs | 19 ++-- aquadoggo/src/replication/manager.rs | 2 +- aquadoggo/src/replication/service.rs | 87 ++++++++++++------- 5 files changed, 109 insertions(+), 53 deletions(-) diff --git a/aquadoggo/src/bus.rs b/aquadoggo/src/bus.rs index 253cfd13d..243ba7fa8 100644 --- a/aquadoggo/src/bus.rs +++ b/aquadoggo/src/bus.rs @@ -16,6 +16,9 @@ pub enum ServiceMessage { /// A new operation arrived at the node. NewOperation(OperationId), + /// Message from scheduler to initiate replication on available peers. + InitiateReplication, + /// Node established a bi-directional connection to another node. ConnectionEstablished(PeerId, ConnectionId), diff --git a/aquadoggo/src/network/replication/behaviour.rs b/aquadoggo/src/network/replication/behaviour.rs index 86c10b359..5c89bcc5b 100644 --- a/aquadoggo/src/network/replication/behaviour.rs +++ b/aquadoggo/src/network/replication/behaviour.rs @@ -6,8 +6,8 @@ use std::task::{Context, Poll}; use libp2p::core::Endpoint; use libp2p::swarm::derive_prelude::ConnectionEstablished; use libp2p::swarm::{ - ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, NotifyHandler, PollParameters, - THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, ConnectionClosed, + ConnectionClosed, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, NotifyHandler, + PollParameters, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; use libp2p::{Multiaddr, PeerId}; use log::{debug, trace, warn}; @@ -125,7 +125,11 @@ impl NetworkBehaviour for Behaviour { connection_id, ))); } - FromSwarm::ConnectionClosed(ConnectionClosed { peer_id, connection_id, ..}) => { + FromSwarm::ConnectionClosed(ConnectionClosed { + peer_id, + connection_id, + .. + }) => { self.events .push_back(ToSwarm::GenerateEvent(Event::ConnectionClosed( peer_id, @@ -287,29 +291,46 @@ mod tests { // Collect the next 2 behaviour events which occur in either swarms. for _ in 0..2 { tokio::select! { - Event::MessageReceived(peer_id, message, _) = swarm1.next_behaviour_event() => res1.push((peer_id, message)), - Event::MessageReceived(peer_id, message, _) = swarm2.next_behaviour_event() => res2.push((peer_id, message)), + Event::ConnectionEstablished(peer_id, _) = swarm1.next_behaviour_event() => res1.push((peer_id, None)), + Event::ConnectionEstablished(peer_id, _) = swarm2.next_behaviour_event() => res2.push((peer_id, None)), + } + } + + // And again add the next 2 behaviour events which occur in either swarms. + for _ in 0..2 { + tokio::select! { + Event::MessageReceived(peer_id, message, _) = swarm1.next_behaviour_event() => res1.push((peer_id, Some(message))), + Event::MessageReceived(peer_id, message, _) = swarm2.next_behaviour_event() => res2.push((peer_id, Some(message))), } } // Each swarm should have emitted exactly one event. - assert_eq!(res1.len(), 1); - assert_eq!(res2.len(), 1); + assert_eq!(res1.len(), 2); + assert_eq!(res2.len(), 2); + + // The first event should have been a ConnectionEstablished containing the expected peer id. + let (peer_id, message) = res1[0].clone(); + assert_eq!(peer_id, swarm2_peer_id); + assert!(message.is_none()); + + let (peer_id, message) = res2[0].clone(); + assert_eq!(peer_id, swarm1_peer_id); + assert!(message.is_none()); // swarm1 should have received the message from swarm2 peer. - let (peer_id, message) = &res1[0]; - assert_eq!(peer_id, &swarm2_peer_id); + let (peer_id, message) = res1[1].clone(); + assert_eq!(peer_id, swarm2_peer_id); assert_eq!( - message, - &SyncMessage::new(1, Message::SyncRequest(0.into(), target_set_2.clone())) + message.unwrap(), + SyncMessage::new(1, Message::SyncRequest(0.into(), target_set_2.clone())) ); // swarm2 should have received the message from swarm1 peer. - let (peer_id, message) = &res2[0]; - assert_eq!(peer_id, &swarm1_peer_id); + let (peer_id, message) = res2[1].clone(); + assert_eq!(peer_id, swarm1_peer_id); assert_eq!( - message, - &SyncMessage::new(0, Message::SyncRequest(0.into(), target_set_1)) + message.unwrap(), + SyncMessage::new(0, Message::SyncRequest(0.into(), target_set_1)) ); } } diff --git a/aquadoggo/src/network/service.rs b/aquadoggo/src/network/service.rs index 3ae9a64b7..26dfb2880 100644 --- a/aquadoggo/src/network/service.rs +++ b/aquadoggo/src/network/service.rs @@ -247,11 +247,13 @@ impl EventLoop { // ~~~~ SwarmEvent::Behaviour(BehaviourEvent::Mdns(event)) => match event { mdns::Event::Discovered(list) => { - for (peer, multiaddr) in list { - debug!("mDNS discovered a new peer: {peer}"); + for (peer_id, multiaddr) in list { + debug!("mDNS discovered a new peer: {peer_id}"); - if let Err(err) = self.swarm.dial(multiaddr) { - warn!("Failed to dial: {}", err); + if !self.swarm.is_connected(&peer_id) { + if let Err(err) = self.swarm.dial(multiaddr) { + warn!("Failed to dial: {}", err); + } } } } @@ -423,9 +425,12 @@ impl EventLoop { // Replication // ~~~~~~~~~~~ SwarmEvent::Behaviour(BehaviourEvent::Replication(event)) => match event { - replication::Event::MessageReceived(peer_id, message, connection_id) => self.send_service_message( - ServiceMessage::ReceivedReplicationMessage(peer_id, connection_id, message), - ), + replication::Event::MessageReceived(peer_id, message, connection_id) => self + .send_service_message(ServiceMessage::ReceivedReplicationMessage( + peer_id, + connection_id, + message, + )), replication::Event::ConnectionEstablished(peer_id, connection_id) => { // Inform other services about new connection self.send_service_message(ServiceMessage::ConnectionEstablished( diff --git a/aquadoggo/src/replication/manager.rs b/aquadoggo/src/replication/manager.rs index ee5dad5c5..b787d07bb 100644 --- a/aquadoggo/src/replication/manager.rs +++ b/aquadoggo/src/replication/manager.rs @@ -119,7 +119,7 @@ where } } - fn remove_session(&mut self, remote_peer: &P, session_id: &SessionId) { + pub fn remove_session(&mut self, remote_peer: &P, session_id: &SessionId) { let sessions = self.sessions.get_mut(remote_peer); if let Some(sessions) = sessions { diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index bf1b17bb6..8c9651b38 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -2,6 +2,7 @@ use std::collections::HashMap; use std::fmt::Display; +use std::time::Duration; use anyhow::Result; use libp2p::swarm::ConnectionId; @@ -11,6 +12,7 @@ use p2panda_rs::schema::SchemaId; use p2panda_rs::Human; use tokio::sync::broadcast::Receiver; use tokio::task; +use tokio::time::sleep; use crate::bus::{ServiceMessage, ServiceSender}; use crate::context::Context; @@ -20,6 +22,8 @@ use crate::replication::errors::ReplicationError; use crate::replication::{Mode, Session, SyncIngest, SyncManager, SyncMessage, TargetSet}; use crate::schema::SchemaProvider; +use super::SessionId; + const MAX_SESSIONS_PER_CONNECTION: usize = 3; #[derive(PartialEq, Eq, PartialOrd, Clone, Debug, Hash)] @@ -27,7 +31,7 @@ const MAX_SESSIONS_PER_CONNECTION: usize = 3; /// Identifier for a connection to another peer. The `ConnectionId` is optional as we need to /// identify the local peer in the replication manager, but in this case there is no single /// connection to associate with. -/// +/// /// @TODO: This could be modelled better maybe... pub struct PeerConnectionId(PeerId, Option); @@ -77,6 +81,7 @@ pub async fn replication_service( target_set, ); + let scheduler_handle = task::spawn(ConnectionManager::start_scheduler(tx.clone())); let handle = task::spawn(manager.run()); if tx_ready.send(()).is_err() { @@ -85,6 +90,7 @@ pub async fn replication_service( tokio::select! { _ = handle => (), + _ = scheduler_handle => (), _ = shutdown => { // @TODO: Wait until all pending replication processes are completed during graceful // shutdown @@ -158,19 +164,15 @@ impl ConnectionManager { async fn on_connection_closed(&mut self, peer_id: PeerId, connection_id: ConnectionId) { // Clear running replication sessions from sync manager info!("Connection closed: remove sessions with peer: {}", peer_id); - self.sync_manager - .remove_sessions(&PeerConnectionId(peer_id, Some(connection_id))); + + let peer_connection_id = PeerConnectionId(peer_id, Some(connection_id)); + + self.sync_manager.remove_sessions(&peer_connection_id); // Remove peer from our connections table - if self - .connections - .remove(&PeerConnectionId(peer_id, Some(connection_id))) - .is_none() - { + if self.connections.remove(&peer_connection_id).is_none() { warn!("Tried to remove unknown connection"); } - - self.update_sessions().await; } async fn on_replication_message( @@ -180,6 +182,9 @@ impl ConnectionManager { connection_id: ConnectionId, ) { trace!("Received SyncMessage: {}", message.display()); + + let session_id = message.session_id(); + match self .sync_manager .handle_message(&PeerConnectionId(peer_id, Some(connection_id)), &message) @@ -195,22 +200,28 @@ impl ConnectionManager { } if result.is_done { - self.on_replication_finished(peer_id, connection_id).await; + self.on_replication_finished(peer_id, connection_id, session_id) + .await; } } Err(err) => { - self.on_replication_error(peer_id, connection_id, err).await; + self.on_replication_error(peer_id, connection_id, session_id, err) + .await; } } } - async fn on_replication_finished(&mut self, peer_id: PeerId, connection_id: ConnectionId) { + async fn on_replication_finished( + &mut self, + peer_id: PeerId, + connection_id: ConnectionId, + session_id: SessionId, + ) { info!("Finished replication with peer {}", peer_id); - match self - .connections - .get_mut(&PeerConnectionId(peer_id, Some(connection_id))) - { + let peer_connection_id = PeerConnectionId(peer_id, Some(connection_id)); + + match self.connections.get_mut(&peer_connection_id) { Some(status) => { status.successful_count += 1; } @@ -218,22 +229,20 @@ impl ConnectionManager { panic!("Tried to access unknown peer"); } } - - self.update_sessions().await; } async fn on_replication_error( &mut self, peer_id: PeerId, connection_id: ConnectionId, + session_id: SessionId, error: ReplicationError, ) { info!("Replication with peer {} failed: {}", peer_id, error); - match self - .connections - .get_mut(&PeerConnectionId(peer_id, Some(connection_id))) - { + let peer_connection_id = PeerConnectionId(peer_id, Some(connection_id)); + + match self.connections.get_mut(&peer_connection_id) { Some(status) => { status.failed_count += 1; } @@ -242,11 +251,13 @@ impl ConnectionManager { } } - // @TODO: Ideally we would know which session the error came from and only close that one. - self.sync_manager - .remove_sessions(&PeerConnectionId(peer_id, Some(connection_id))); - - self.update_sessions().await; + match error { + ReplicationError::StrategyFailed(_) | ReplicationError::Validation(_) => { + self.sync_manager + .remove_session(&peer_connection_id, &session_id); + } + _ => (), // Don't try and close the session on other errors as it should not have been initiated + } } async fn handle_service_message(&mut self, message: ServiceMessage) { @@ -261,11 +272,15 @@ impl ConnectionManager { self.on_replication_message(peer_id, message, connection_id) .await; } + ServiceMessage::InitiateReplication => { + info!("Initiate replication"); + self.update_sessions().await; + } _ => (), // Ignore all other messages } } - fn send_service_message(&mut self, message: ServiceMessage) { + fn send_service_message(&self, message: ServiceMessage) { if self.tx.send(message).is_err() { // Silently fail here as we don't care if the message was received at this // point @@ -313,7 +328,9 @@ impl ConnectionManager { for message in messages { self.send_service_message(ServiceMessage::SentReplicationMessage( peer_connection_id.0, - peer_connection_id.1.expect("Remote peer found without a connection id"), + peer_connection_id + .1 + .expect("Remote peer found without a connection id"), message, )); } @@ -334,4 +351,14 @@ impl ConnectionManager { } } } + + pub async fn start_scheduler(tx: ServiceSender) { + loop { + sleep(Duration::from_secs(5)).await; + if tx.send(ServiceMessage::InitiateReplication).is_err() { + // Silently fail here as we don't care if the message was received at this + // point + } + } + } } From e5bf152c200e64dc18fa654576b7654904cb2752 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Wed, 31 May 2023 12:27:53 +0100 Subject: [PATCH 060/126] Refactor replication behaviour event triggering --- .../src/network/replication/behaviour.rs | 44 +++++++------------ aquadoggo/src/network/service.rs | 4 ++ 2 files changed, 21 insertions(+), 27 deletions(-) diff --git a/aquadoggo/src/network/replication/behaviour.rs b/aquadoggo/src/network/replication/behaviour.rs index 5c89bcc5b..d4d7e911e 100644 --- a/aquadoggo/src/network/replication/behaviour.rs +++ b/aquadoggo/src/network/replication/behaviour.rs @@ -78,23 +78,33 @@ impl NetworkBehaviour for Behaviour { fn handle_established_inbound_connection( &mut self, - _: ConnectionId, - _: PeerId, + connection_id: ConnectionId, + peer_id: PeerId, _: &Multiaddr, _: &Multiaddr, ) -> Result, ConnectionDenied> { debug!("Replication Behaviour: established inbound connection"); + self.events + .push_back(ToSwarm::GenerateEvent(Event::ConnectionEstablished( + peer_id, + connection_id, + ))); Ok(Handler::new()) } fn handle_established_outbound_connection( &mut self, - _: ConnectionId, - _: PeerId, + connection_id: ConnectionId, + peer_id: PeerId, _: &Multiaddr, _: Endpoint, ) -> Result, ConnectionDenied> { debug!("Replication Behaviour: established outbound connection"); + self.events + .push_back(ToSwarm::GenerateEvent(Event::ConnectionEstablished( + peer_id, + connection_id, + ))); Ok(Handler::new()) } @@ -114,29 +124,9 @@ impl NetworkBehaviour for Behaviour { fn on_swarm_event(&mut self, event: FromSwarm) { match event { - FromSwarm::ConnectionEstablished(ConnectionEstablished { - peer_id, - connection_id, - .. - }) => { - self.events - .push_back(ToSwarm::GenerateEvent(Event::ConnectionEstablished( - peer_id, - connection_id, - ))); - } - FromSwarm::ConnectionClosed(ConnectionClosed { - peer_id, - connection_id, - .. - }) => { - self.events - .push_back(ToSwarm::GenerateEvent(Event::ConnectionClosed( - peer_id, - connection_id, - ))); - } - FromSwarm::AddressChange(_) + FromSwarm::ConnectionEstablished(_) + | FromSwarm::ConnectionClosed(_) + | FromSwarm::AddressChange(_) | FromSwarm::DialFailure(_) | FromSwarm::ListenFailure(_) | FromSwarm::NewListener(_) diff --git a/aquadoggo/src/network/service.rs b/aquadoggo/src/network/service.rs index 26dfb2880..05c684d0b 100644 --- a/aquadoggo/src/network/service.rs +++ b/aquadoggo/src/network/service.rs @@ -250,6 +250,10 @@ impl EventLoop { for (peer_id, multiaddr) in list { debug!("mDNS discovered a new peer: {peer_id}"); + // Only dial the newly discovered peer if we're not already connected. + // + // @TODO: Is this even a thing? Trying to catch the case where two peers + // simultaneously discover and connect to each other. if !self.swarm.is_connected(&peer_id) { if let Err(err) = self.swarm.dial(multiaddr) { warn!("Failed to dial: {}", err); From b6d139b13a781e683d8d350cffb5f3a4188b3b1e Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Wed, 31 May 2023 14:03:01 +0100 Subject: [PATCH 061/126] Temp fix for UNIQUE --- aquadoggo/src/db/stores/document.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/aquadoggo/src/db/stores/document.rs b/aquadoggo/src/db/stores/document.rs index afce47dc1..39dc6efd5 100644 --- a/aquadoggo/src/db/stores/document.rs +++ b/aquadoggo/src/db/stores/document.rs @@ -459,6 +459,7 @@ async fn insert_document_view( ) VALUES ($1, $2, $3) + ON CONFLICT(document_view_id) DO NOTHING -- @TODO: temp fix for double document view insertions: https://github.com/p2panda/aquadoggo/issues/398 ", ) .bind(document_view.id().to_string()) From f7c7cdaee17fae7ff1063ff09b26cfdc834cb869 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Wed, 31 May 2023 15:24:43 +0100 Subject: [PATCH 062/126] Send SyncMessages to one handler by ConnectionId --- aquadoggo/src/network/replication/behaviour.rs | 9 +++++++-- aquadoggo/src/network/service.rs | 4 ++-- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/aquadoggo/src/network/replication/behaviour.rs b/aquadoggo/src/network/replication/behaviour.rs index d4d7e911e..fc0d03423 100644 --- a/aquadoggo/src/network/replication/behaviour.rs +++ b/aquadoggo/src/network/replication/behaviour.rs @@ -40,7 +40,12 @@ impl Behaviour { } impl Behaviour { - pub fn send_message(&mut self, peer_id: PeerId, message: SyncMessage) { + pub fn send_message( + &mut self, + peer_id: PeerId, + connection_id: ConnectionId, + message: SyncMessage, + ) { trace!( "Notify handler of sent sync message: {peer_id} {}", message.display() @@ -48,7 +53,7 @@ impl Behaviour { self.events.push_back(ToSwarm::NotifyHandler { peer_id, event: HandlerInEvent::Message(message), - handler: NotifyHandler::Any, + handler: NotifyHandler::One(connection_id), }); } diff --git a/aquadoggo/src/network/service.rs b/aquadoggo/src/network/service.rs index 05c684d0b..4266e5070 100644 --- a/aquadoggo/src/network/service.rs +++ b/aquadoggo/src/network/service.rs @@ -159,11 +159,11 @@ impl EventLoop { /// Handle an incoming message via the communication bus from other services. async fn handle_service_message(&mut self, message: ServiceMessage) { - if let ServiceMessage::SentReplicationMessage(peer_id, _connection_id, sync_message) = message { + if let ServiceMessage::SentReplicationMessage(peer_id, connection_id, sync_message) = message { self.swarm .behaviour_mut() .replication - .send_message(peer_id, sync_message); + .send_message(peer_id, connection_id, sync_message); } } From afd24a559930979bef2d3bf1c40d8d336d39d472 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Wed, 31 May 2023 17:34:25 +0100 Subject: [PATCH 063/126] Maintain list of peers and all their connections on ConnectionManager --- aquadoggo/src/replication/service.rs | 131 +++++++++++++++++++-------- 1 file changed, 91 insertions(+), 40 deletions(-) diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index 8c9651b38..5b63c8856 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -100,6 +100,7 @@ pub async fn replication_service( Ok(()) } +#[derive(Debug, Clone, PartialEq, Eq)] struct PeerStatus { peer_id: PeerId, successful_count: usize, @@ -116,8 +117,14 @@ impl PeerStatus { } } +#[derive(Debug, Clone, PartialEq, Eq)] +struct PeerConnections { + status: PeerStatus, + connections: Vec, +} + struct ConnectionManager { - connections: HashMap, + connections: HashMap, sync_manager: SyncManager, tx: ServiceSender, rx: Receiver, @@ -145,17 +152,57 @@ impl ConnectionManager { } } + fn remove_connection(&mut self, peer_id: PeerId, connection_id: ConnectionId) { + let peer = self.connections.get_mut(&peer_id); + + match peer { + Some(peer) => { + if peer + .connections + .iter() + .find(|id| *id != &connection_id) + .is_none() + { + warn!("Tried to remove unknown connection"); + }; + + peer.connections = peer + .connections + .iter() + .filter(|id| *id != &connection_id) + .map(ConnectionId::to_owned) + .collect(); + } + None => { + warn!("Tried to remove connection from unknown peer"); + } + } + } + async fn on_connection_established(&mut self, peer_id: PeerId, connection_id: ConnectionId) { info!("Connection established with peer: {}", peer_id); - if self - .connections - .insert( - PeerConnectionId(peer_id, Some(connection_id)), - PeerStatus::new(&peer_id), - ) - .is_some() - { - warn!("Duplicate established connection encountered"); + let peer = self.connections.get_mut(&peer_id); + + match peer { + Some(peer) => { + if peer + .connections + .iter() + .find(|id| *id == &connection_id) + .is_some() + { + warn!("Duplicate established connection encountered"); + } else { + peer.connections.push(connection_id) + } + } + None => { + let peer_connections = PeerConnections { + status: PeerStatus::new(&peer_id), + connections: vec![connection_id], + }; + self.connections.insert(peer_id, peer_connections); + } } self.update_sessions().await; @@ -169,10 +216,7 @@ impl ConnectionManager { self.sync_manager.remove_sessions(&peer_connection_id); - // Remove peer from our connections table - if self.connections.remove(&peer_connection_id).is_none() { - warn!("Tried to remove unknown connection"); - } + self.remove_connection(peer_id, connection_id) } async fn on_replication_message( @@ -218,17 +262,18 @@ impl ConnectionManager { session_id: SessionId, ) { info!("Finished replication with peer {}", peer_id); - - let peer_connection_id = PeerConnectionId(peer_id, Some(connection_id)); - - match self.connections.get_mut(&peer_connection_id) { - Some(status) => { - status.successful_count += 1; + match self.connections.get_mut(&peer_id) { + Some(peer) => { + peer.status.successful_count += 1; } None => { panic!("Tried to access unknown peer"); } } + + let peer_connection_id = PeerConnectionId(peer_id, Some(connection_id)); + self.sync_manager + .remove_session(&peer_connection_id, &session_id); } async fn on_replication_error( @@ -240,11 +285,9 @@ impl ConnectionManager { ) { info!("Replication with peer {} failed: {}", peer_id, error); - let peer_connection_id = PeerConnectionId(peer_id, Some(connection_id)); - - match self.connections.get_mut(&peer_connection_id) { - Some(status) => { - status.failed_count += 1; + match self.connections.get_mut(&peer_id) { + Some(peer) => { + peer.status.failed_count += 1; } None => { panic!("Tried to access unknown peer"); @@ -253,8 +296,11 @@ impl ConnectionManager { match error { ReplicationError::StrategyFailed(_) | ReplicationError::Validation(_) => { + let peer_connection_id = PeerConnectionId(peer_id, Some(connection_id)); self.sync_manager .remove_session(&peer_connection_id, &session_id); + + self.remove_connection(peer_id, connection_id); } _ => (), // Don't try and close the session on other errors as it should not have been initiated } @@ -292,23 +338,28 @@ impl ConnectionManager { let attempt_peers: Vec = self .connections .iter() - .filter_map(|(peer_connection_id, peer_status)| { + .flat_map(|(peer_id, peer_connections)| { // Find out how many sessions we know about for each peer - let sessions = self.sync_manager.get_sessions(&peer_connection_id); - let active_sessions: Vec<&Session> = sessions - .iter() - .filter(|session| session.is_done()) - .collect(); - - // Check if we're running too many sessions with that peer on this connection already - if active_sessions.len() < MAX_SESSIONS_PER_CONNECTION { - return Some(peer_connection_id.to_owned()); + let mut connections = vec![]; + for connection_id in &peer_connections.connections { + let peer_connection_id = PeerConnectionId(*peer_id, Some(*connection_id)); + let sessions = self.sync_manager.get_sessions(&peer_connection_id); + let active_sessions: Vec<&Session> = sessions + .iter() + .filter(|session| session.is_done()) + .collect(); + + // Check if we're running too many sessions with that peer on this connection already + if active_sessions.len() < MAX_SESSIONS_PER_CONNECTION { + connections.push(peer_connection_id.to_owned()); + } else { + debug!( + "Max sessions reached for connection: {:?}", + peer_connection_id + ); + } } - debug!( - "Max sessions reached for connection: {:?}", - peer_connection_id - ); - None + connections }) .collect(); From 6183ca7141a5efef89d34acf36bedae414be6f99 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Wed, 31 May 2023 17:34:48 +0100 Subject: [PATCH 064/126] Remove connection from ConnectionManager when swarm issues ConnectionClosed event --- .../src/network/replication/behaviour.rs | 17 +++++++++++++--- aquadoggo/src/network/service.rs | 20 +++++++++++-------- 2 files changed, 26 insertions(+), 11 deletions(-) diff --git a/aquadoggo/src/network/replication/behaviour.rs b/aquadoggo/src/network/replication/behaviour.rs index fc0d03423..015dc486e 100644 --- a/aquadoggo/src/network/replication/behaviour.rs +++ b/aquadoggo/src/network/replication/behaviour.rs @@ -6,8 +6,9 @@ use std::task::{Context, Poll}; use libp2p::core::Endpoint; use libp2p::swarm::derive_prelude::ConnectionEstablished; use libp2p::swarm::{ - ConnectionClosed, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, NotifyHandler, - PollParameters, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + ConnectionClosed, ConnectionDenied, ConnectionId, FromSwarm, + NetworkBehaviour, NotifyHandler, PollParameters, THandler, THandlerInEvent, THandlerOutEvent, + ToSwarm, }; use libp2p::{Multiaddr, PeerId}; use log::{debug, trace, warn}; @@ -129,8 +130,18 @@ impl NetworkBehaviour for Behaviour { fn on_swarm_event(&mut self, event: FromSwarm) { match event { + FromSwarm::ConnectionClosed(ConnectionClosed { + peer_id, + connection_id, + .. + }) => { + self.events + .push_back(ToSwarm::GenerateEvent(Event::ConnectionClosed( + peer_id, + connection_id, + ))); + } FromSwarm::ConnectionEstablished(_) - | FromSwarm::ConnectionClosed(_) | FromSwarm::AddressChange(_) | FromSwarm::DialFailure(_) | FromSwarm::ListenFailure(_) diff --git a/aquadoggo/src/network/service.rs b/aquadoggo/src/network/service.rs index 4266e5070..17afdff14 100644 --- a/aquadoggo/src/network/service.rs +++ b/aquadoggo/src/network/service.rs @@ -159,11 +159,14 @@ impl EventLoop { /// Handle an incoming message via the communication bus from other services. async fn handle_service_message(&mut self, message: ServiceMessage) { - if let ServiceMessage::SentReplicationMessage(peer_id, connection_id, sync_message) = message { - self.swarm - .behaviour_mut() - .replication - .send_message(peer_id, connection_id, sync_message); + if let ServiceMessage::SentReplicationMessage(peer_id, connection_id, sync_message) = + message + { + self.swarm.behaviour_mut().replication.send_message( + peer_id, + connection_id, + sync_message, + ); } } @@ -239,7 +242,8 @@ impl EventLoop { info!("Listening on {address}"); } SwarmEvent::OutgoingConnectionError { peer_id, error } => { - warn!("OutgoingConnectionError: {peer_id:?} {error:?}") + warn!("OutgoingConnectionError: {peer_id:?} {error:?}"); + // self.send_service_message(ServiceMessage::ConnectionError(peer_id)); } // ~~~~ @@ -251,9 +255,9 @@ impl EventLoop { debug!("mDNS discovered a new peer: {peer_id}"); // Only dial the newly discovered peer if we're not already connected. - // + // // @TODO: Is this even a thing? Trying to catch the case where two peers - // simultaneously discover and connect to each other. + // simultaneously discover and connect to each other. if !self.swarm.is_connected(&peer_id) { if let Err(err) = self.swarm.dial(multiaddr) { warn!("Failed to dial: {}", err); From f3c3f6cc298e324126c63679c35b298f5b0d5959 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Wed, 31 May 2023 17:45:51 +0100 Subject: [PATCH 065/126] Refactor ConnectionEstablished messaging in replication behaviour --- .../src/network/replication/behaviour.rs | 33 ++++++++++--------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/aquadoggo/src/network/replication/behaviour.rs b/aquadoggo/src/network/replication/behaviour.rs index 015dc486e..11dba390e 100644 --- a/aquadoggo/src/network/replication/behaviour.rs +++ b/aquadoggo/src/network/replication/behaviour.rs @@ -6,9 +6,8 @@ use std::task::{Context, Poll}; use libp2p::core::Endpoint; use libp2p::swarm::derive_prelude::ConnectionEstablished; use libp2p::swarm::{ - ConnectionClosed, ConnectionDenied, ConnectionId, FromSwarm, - NetworkBehaviour, NotifyHandler, PollParameters, THandler, THandlerInEvent, THandlerOutEvent, - ToSwarm, + ConnectionClosed, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, NotifyHandler, + PollParameters, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; use libp2p::{Multiaddr, PeerId}; use log::{debug, trace, warn}; @@ -90,11 +89,6 @@ impl NetworkBehaviour for Behaviour { _: &Multiaddr, ) -> Result, ConnectionDenied> { debug!("Replication Behaviour: established inbound connection"); - self.events - .push_back(ToSwarm::GenerateEvent(Event::ConnectionEstablished( - peer_id, - connection_id, - ))); Ok(Handler::new()) } @@ -106,11 +100,6 @@ impl NetworkBehaviour for Behaviour { _: Endpoint, ) -> Result, ConnectionDenied> { debug!("Replication Behaviour: established outbound connection"); - self.events - .push_back(ToSwarm::GenerateEvent(Event::ConnectionEstablished( - peer_id, - connection_id, - ))); Ok(Handler::new()) } @@ -141,8 +130,22 @@ impl NetworkBehaviour for Behaviour { connection_id, ))); } - FromSwarm::ConnectionEstablished(_) - | FromSwarm::AddressChange(_) + FromSwarm::ConnectionEstablished(ConnectionEstablished { + peer_id, + connection_id, + other_established, + .. + }) => { + if other_established > 0 { + warn!("Multiple connections established to peer: {} {}", other_established + 1, peer_id); + } + self.events + .push_back(ToSwarm::GenerateEvent(Event::ConnectionEstablished( + peer_id, + connection_id, + ))); + } + FromSwarm::AddressChange(_) | FromSwarm::DialFailure(_) | FromSwarm::ListenFailure(_) | FromSwarm::NewListener(_) From 86222a48b30b98d699ea45dc9a94db5c6c5cd5ca Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Wed, 31 May 2023 19:13:29 +0100 Subject: [PATCH 066/126] Improve error handling and logging --- .../src/network/replication/behaviour.rs | 11 ++--- aquadoggo/src/network/service.rs | 2 + aquadoggo/src/replication/errors.rs | 7 ++- aquadoggo/src/replication/manager.rs | 46 +++++++++++-------- aquadoggo/src/replication/service.rs | 40 +++++++--------- aquadoggo/src/replication/strategies/diff.rs | 24 +++++----- 6 files changed, 67 insertions(+), 63 deletions(-) diff --git a/aquadoggo/src/network/replication/behaviour.rs b/aquadoggo/src/network/replication/behaviour.rs index 11dba390e..78bb8686e 100644 --- a/aquadoggo/src/network/replication/behaviour.rs +++ b/aquadoggo/src/network/replication/behaviour.rs @@ -83,23 +83,21 @@ impl NetworkBehaviour for Behaviour { fn handle_established_inbound_connection( &mut self, - connection_id: ConnectionId, - peer_id: PeerId, + _: ConnectionId, + _: PeerId, _: &Multiaddr, _: &Multiaddr, ) -> Result, ConnectionDenied> { - debug!("Replication Behaviour: established inbound connection"); Ok(Handler::new()) } fn handle_established_outbound_connection( &mut self, - connection_id: ConnectionId, - peer_id: PeerId, + _: ConnectionId, + _: PeerId, _: &Multiaddr, _: Endpoint, ) -> Result, ConnectionDenied> { - debug!("Replication Behaviour: established outbound connection"); Ok(Handler::new()) } @@ -109,7 +107,6 @@ impl NetworkBehaviour for Behaviour { connection_id: ConnectionId, handler_event: THandlerOutEvent, ) { - debug!("Replication Behaviour: connection handler event"); match handler_event { HandlerOutEvent::Message(message) => { self.handle_received_message(&peer, message, connection_id); diff --git a/aquadoggo/src/network/service.rs b/aquadoggo/src/network/service.rs index 17afdff14..dfc563cce 100644 --- a/aquadoggo/src/network/service.rs +++ b/aquadoggo/src/network/service.rs @@ -262,6 +262,8 @@ impl EventLoop { if let Err(err) = self.swarm.dial(multiaddr) { warn!("Failed to dial: {}", err); } + } else { + warn!("Not dialing discovered peer as connection already exists: {peer_id:?}") } } } diff --git a/aquadoggo/src/replication/errors.rs b/aquadoggo/src/replication/errors.rs index ded29e73b..c55ae4e1c 100644 --- a/aquadoggo/src/replication/errors.rs +++ b/aquadoggo/src/replication/errors.rs @@ -12,8 +12,11 @@ pub enum ReplicationError { #[error("Duplicate session error: {0}")] DuplicateSession(#[from] DuplicateSessionRequestError), - #[error("No session found with id {0}")] - NoSessionFound(u64), + #[error("No session found with id {0} for peer {1}")] + NoSessionFound(u64, String), + + #[error("No sessions found for peer {0}")] + NoPeerFound(String), #[error("Received entry which is not in target set")] UnmatchedTargetSet, diff --git a/aquadoggo/src/replication/manager.rs b/aquadoggo/src/replication/manager.rs index b787d07bb..04affffb2 100644 --- a/aquadoggo/src/replication/manager.rs +++ b/aquadoggo/src/replication/manager.rs @@ -3,7 +3,7 @@ use std::collections::HashMap; use anyhow::Result; -use log::{debug, info, warn}; +use log::{debug, info, trace, warn}; use p2panda_rs::entry::EncodedEntry; use p2panda_rs::operation::EncodedOperation; use p2panda_rs::Human; @@ -68,6 +68,7 @@ where /// Warning: This might also remove actively running sessions. Do only clear sessions when you /// are sure they are a) done or b) the peer closed its connection. pub fn remove_sessions(&mut self, remote_peer: &P) { + debug!("Remove all sessions with peer: {remote_peer:?}"); self.sessions.remove(remote_peer); } @@ -128,8 +129,13 @@ where .enumerate() .find(|(_, session)| session.id == *session_id) { + debug!("Remove session {session_id} with peer: {remote_peer:?}"); sessions.remove(index); + } else { + warn!("Tried to remove nonexistent session {session_id} with peer: {remote_peer:?}") } + } else { + warn!("Tried to remove sessions from unknown peer: {remote_peer:?}") } } @@ -338,11 +344,6 @@ where let sessions = self.get_sessions(remote_peer); - info!( - "Initiate inbound replication session with peer {:?}", - remote_peer - ); - // Check if a session with this id already exists for this peer, this can happen if both // peers started to initiate a session at the same time, or if the remote peer sent two // sync request messages with the same session id. @@ -362,12 +363,17 @@ where .iter() .find(|session| session.target_set() == *target_set) { - debug!("Handle sync request containing duplicate session id"); + debug!("Handle sync request containing duplicate target sets"); return self .handle_duplicate_target_set(remote_peer, session_id, mode, session) .await; }; + info!( + "Accept inbound replication session with peer {:?}", + remote_peer + ); + let messages = self .insert_and_initialize_session(remote_peer, session_id, target_set, mode, false) .await; @@ -381,10 +387,11 @@ where session_id: &SessionId, message: &Message, ) -> Result { - debug!( + trace!( "Message received: {session_id} {remote_peer:?} {}", message.display() ); + let sessions = self.sessions.get_mut(remote_peer); let (is_both_done, messages) = match sessions { @@ -399,15 +406,17 @@ where let is_both_done = session.state == SessionState::Done; Ok((is_both_done, messages)) } else { - Err(ReplicationError::NoSessionFound(*session_id)) + Err(ReplicationError::NoSessionFound( + *session_id, + format!("{remote_peer:?}"), + )) } } - None => Err(ReplicationError::NoSessionFound(*session_id)), + None => Err(ReplicationError::NoPeerFound(format!("{remote_peer:?}"))), }?; // We're done, clean up after ourselves if is_both_done { - debug!("Both peers done, removing session: {session_id:?} {remote_peer:?}"); self.remove_session(remote_peer, session_id); } @@ -433,8 +442,7 @@ where { session.validate_entry(entry_bytes, operation_bytes.as_ref())?; - let result = self - .ingest + self.ingest .handle_entry( &self.store, entry_bytes, @@ -443,19 +451,17 @@ where .as_ref() .expect("For now we always expect an operation here"), ) - .await; - - result.map_err(|err| { - warn!("{:?}", err); - err - })?; + .await?; Ok(SyncResult { messages: vec![], is_done: session.state == SessionState::Done, }) } else { - Err(ReplicationError::NoSessionFound(*session_id)) + Err(ReplicationError::NoSessionFound( + *session_id, + format!("{remote_peer:?}"), + )) } } diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index 5b63c8856..a4b5451ca 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -160,18 +160,20 @@ impl ConnectionManager { if peer .connections .iter() - .find(|id| *id != &connection_id) + .find(|id| *id == &connection_id) .is_none() { - warn!("Tried to remove unknown connection"); - }; + debug!("Tried to remove unknown connection: {peer_id} {connection_id:?}"); + } else { + debug!("Remove connection: {peer_id} {connection_id:?}"); - peer.connections = peer - .connections - .iter() - .filter(|id| *id != &connection_id) - .map(ConnectionId::to_owned) - .collect(); + peer.connections = peer + .connections + .iter() + .filter(|id| *id != &connection_id) + .map(ConnectionId::to_owned) + .collect(); + }; } None => { warn!("Tried to remove connection from unknown peer"); @@ -213,9 +215,7 @@ impl ConnectionManager { info!("Connection closed: remove sessions with peer: {}", peer_id); let peer_connection_id = PeerConnectionId(peer_id, Some(connection_id)); - self.sync_manager.remove_sessions(&peer_connection_id); - self.remove_connection(peer_id, connection_id) } @@ -225,8 +225,6 @@ impl ConnectionManager { message: SyncMessage, connection_id: ConnectionId, ) { - trace!("Received SyncMessage: {}", message.display()); - let session_id = message.session_id(); match self @@ -270,10 +268,6 @@ impl ConnectionManager { panic!("Tried to access unknown peer"); } } - - let peer_connection_id = PeerConnectionId(peer_id, Some(connection_id)); - self.sync_manager - .remove_session(&peer_connection_id, &session_id); } async fn on_replication_error( @@ -283,7 +277,7 @@ impl ConnectionManager { session_id: SessionId, error: ReplicationError, ) { - info!("Replication with peer {} failed: {}", peer_id, error); + warn!("Replication with peer {} failed: {}", peer_id, error); match self.connections.get_mut(&peer_id) { Some(peer) => { @@ -299,8 +293,6 @@ impl ConnectionManager { let peer_connection_id = PeerConnectionId(peer_id, Some(connection_id)); self.sync_manager .remove_session(&peer_connection_id, &session_id); - - self.remove_connection(peer_id, connection_id); } _ => (), // Don't try and close the session on other errors as it should not have been initiated } @@ -319,7 +311,6 @@ impl ConnectionManager { .await; } ServiceMessage::InitiateReplication => { - info!("Initiate replication"); self.update_sessions().await; } _ => (), // Ignore all other messages @@ -356,15 +347,18 @@ impl ConnectionManager { debug!( "Max sessions reached for connection: {:?}", peer_connection_id - ); + ); } } connections }) .collect(); + if attempt_peers.is_empty() { + info!("No peers available for replication") + } + for peer_connection_id in attempt_peers { - debug!("Initiate replication with: {:?}", peer_connection_id); self.initiate_replication(&peer_connection_id).await; } } diff --git a/aquadoggo/src/replication/strategies/diff.rs b/aquadoggo/src/replication/strategies/diff.rs index 5ec280eae..ea03a320c 100644 --- a/aquadoggo/src/replication/strategies/diff.rs +++ b/aquadoggo/src/replication/strategies/diff.rs @@ -2,7 +2,7 @@ use std::collections::HashMap; -use log::debug; +use log::{debug, trace}; use p2panda_rs::{ entry::{LogId, SeqNum}, identity::PublicKey, @@ -16,14 +16,14 @@ fn remote_requires_entries( local_seq_num: &SeqNum, remote_log_heights: &HashMap, ) -> Option<(LogId, SeqNum)> { - debug!("Local log height: {:?} {:?}", log_id, local_seq_num); + trace!("Local log height: {:?} {:?}", log_id, local_seq_num); // Get height of the remote log by it's id. let remote_log_height = remote_log_heights.get(&log_id); match remote_log_height { // If a log exists then compare heights of local and remote logs. Some(remote_seq_num) => { - debug!("Remote log height: {:?} {:?}", log_id, remote_seq_num); + trace!("Remote log height: {:?} {:?}", log_id, remote_seq_num); // If the local seq num is higher the remote needs all entries higher than // their max seq num for this log. @@ -35,21 +35,21 @@ fn remote_requires_entries( // will not reach max seq number. let from_seq_num = remote_seq_num.clone().next().unwrap(); - debug!( + trace!( "Remote needs entries from {:?} for {:?}", from_seq_num, log_id ); Some((log_id.to_owned(), from_seq_num)) } else { - debug!("Remote has all entries for {:?}", log_id); + trace!("Remote has all entries for {:?}", log_id); None } } // If no log exists then the remote has a log we don't know about yet and we // return nothing. None => { - debug!("{:?} not found on remote, all entries required", log_id); + trace!("{:?} not found on remote, all entries required", log_id); Some((log_id.to_owned(), SeqNum::default())) } } @@ -62,7 +62,7 @@ pub fn diff_log_heights( let mut remote_needs = Vec::new(); for (local_author, local_author_logs) in local_log_heights { - debug!( + trace!( "Local log heights: {} {:?}", local_author.display(), local_author_logs @@ -71,15 +71,14 @@ pub fn diff_log_heights( let local_author_logs: HashMap = local_author_logs.to_owned().into_iter().collect(); - // Find all logs for a public key sent by the remote peer. + // Find all logs sent by the remote for a public key we have locally. // - // If none is found we don't do anything as this means we are missing entries they should - // send us. + // If none is found we know they need everything we have by this author. if let Some(remote_author_logs) = remote_log_heights.get(&local_author) { let remote_author_logs: HashMap = remote_author_logs.to_owned().into_iter().collect(); - debug!("Remote log heights: {} {:?}", local_author.display(), { + trace!("Remote log heights: {} {:?}", local_author.display(), { let mut logs = remote_author_logs .clone() .into_iter() @@ -90,6 +89,8 @@ pub fn diff_log_heights( let mut remote_needs_logs = vec![]; + // For each log we diff the local and remote height and determine which entries, if + // any, we should send them. for (log_id, seq_num) in local_author_logs { if let Some(from_log_height) = remote_requires_entries(&log_id, &seq_num, &remote_author_logs) @@ -109,6 +110,7 @@ pub fn diff_log_heights( // The author we know about locally wasn't found on the remote log heights so they // need everything we have. + trace!("No logs found on remote for this author"); let mut remote_needs_logs: Vec<(LogId, SeqNum)> = local_author_logs .iter() .map(|(log_id, _)| (*log_id, SeqNum::default())) From cd7629feaa33e992a3c374e9c5547e621bf45db2 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Wed, 31 May 2023 19:19:51 +0100 Subject: [PATCH 067/126] Update api in behaviour network tests --- aquadoggo/src/network/replication/behaviour.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/aquadoggo/src/network/replication/behaviour.rs b/aquadoggo/src/network/replication/behaviour.rs index 78bb8686e..665ecb803 100644 --- a/aquadoggo/src/network/replication/behaviour.rs +++ b/aquadoggo/src/network/replication/behaviour.rs @@ -134,7 +134,11 @@ impl NetworkBehaviour for Behaviour { .. }) => { if other_established > 0 { - warn!("Multiple connections established to peer: {} {}", other_established + 1, peer_id); + warn!( + "Multiple connections established to peer: {} {}", + other_established + 1, + peer_id + ); } self.events .push_back(ToSwarm::GenerateEvent(Event::ConnectionEstablished( @@ -171,7 +175,7 @@ impl NetworkBehaviour for Behaviour { #[cfg(test)] mod tests { use futures::FutureExt; - use libp2p::swarm::{keep_alive, Swarm}; + use libp2p::swarm::{keep_alive, ConnectionId, Swarm}; use libp2p_swarm_test::SwarmExt; use p2panda_rs::schema::SchemaId; use rstest::rstest; @@ -247,6 +251,7 @@ mod tests { // Send a message from to swarm1 local peer from swarm2 local peer. swarm1.behaviour_mut().send_message( swarm2_peer_id, + ConnectionId::new_unchecked(0), SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))), ); @@ -285,12 +290,14 @@ mod tests { // Send a message from swarm1 to peer2. swarm1.behaviour_mut().send_message( swarm2_peer_id, + ConnectionId::new_unchecked(0), SyncMessage::new(0, Message::SyncRequest(0.into(), target_set_1.clone())), ); // Send a message from swarm2 peer1. swarm2.behaviour_mut().send_message( swarm1_peer_id, + ConnectionId::new_unchecked(0), SyncMessage::new(1, Message::SyncRequest(0.into(), target_set_2.clone())), ); From 5152ff6b319940a7c16325c7aba8dbbea9a23f00 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Wed, 31 May 2023 20:22:29 +0100 Subject: [PATCH 068/126] Error logging in replication connection handler --- aquadoggo/src/network/replication/handler.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/aquadoggo/src/network/replication/handler.rs b/aquadoggo/src/network/replication/handler.rs index 42dbe6255..b153cd5ca 100644 --- a/aquadoggo/src/network/replication/handler.rs +++ b/aquadoggo/src/network/replication/handler.rs @@ -152,7 +152,9 @@ impl ConnectionHandler for Handler { } ConnectionEvent::DialUpgradeError(_) | ConnectionEvent::AddressChange(_) - | ConnectionEvent::ListenUpgradeError(_) => {} + | ConnectionEvent::ListenUpgradeError(_) => { + warn!("Connection event error"); + } } } @@ -293,6 +295,7 @@ impl ConnectionHandler for Handler { Some(OutboundSubstreamState::PendingFlush(substream)) } Err(err) => { + warn!("{err:#?}"); return Poll::Ready(ConnectionHandlerEvent::Close( HandlerError::Codec(err), )); @@ -300,6 +303,7 @@ impl ConnectionHandler for Handler { } } Poll::Ready(Err(err)) => { + warn!("{err:#?}"); return Poll::Ready(ConnectionHandlerEvent::Close( HandlerError::Codec(err), )); @@ -318,9 +322,10 @@ impl ConnectionHandler for Handler { Some(OutboundSubstreamState::WaitingOutput(substream)) } Poll::Ready(Err(err)) => { + warn!("{err:#?}"); return Poll::Ready(ConnectionHandlerEvent::Close(HandlerError::Codec( err, - ))) + ))); } Poll::Pending => { self.outbound_substream = From d620053f54683aac0b3798f13ebae3237084a825 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Wed, 31 May 2023 21:58:08 +0100 Subject: [PATCH 069/126] Cargo clippy --- aquadoggo/src/network/replication/behaviour.rs | 2 +- aquadoggo/src/replication/manager.rs | 18 +++++++++--------- aquadoggo/src/replication/service.rs | 16 ++++++---------- aquadoggo/src/replication/strategies/diff.rs | 16 +++++++--------- aquadoggo/src/replication/strategies/naive.rs | 6 ++---- aquadoggo/src/replication/target_set.rs | 10 ++-------- 6 files changed, 27 insertions(+), 41 deletions(-) diff --git a/aquadoggo/src/network/replication/behaviour.rs b/aquadoggo/src/network/replication/behaviour.rs index 665ecb803..55aee3dd9 100644 --- a/aquadoggo/src/network/replication/behaviour.rs +++ b/aquadoggo/src/network/replication/behaviour.rs @@ -10,7 +10,7 @@ use libp2p::swarm::{ PollParameters, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; use libp2p::{Multiaddr, PeerId}; -use log::{debug, trace, warn}; +use log::{trace, warn}; use p2panda_rs::Human; use crate::network::replication::handler::{Handler, HandlerInEvent, HandlerOutEvent}; diff --git a/aquadoggo/src/replication/manager.rs b/aquadoggo/src/replication/manager.rs index 04affffb2..b321b78be 100644 --- a/aquadoggo/src/replication/manager.rs +++ b/aquadoggo/src/replication/manager.rs @@ -318,9 +318,9 @@ where let messages = self .insert_and_initialize_session( remote_peer, - &session_id, + session_id, &existing_session.target_set(), - &mode, + mode, false, ) .await; @@ -510,8 +510,8 @@ mod tests { use super::{SyncManager, INITIAL_SESSION_ID}; - const PEER_ID_LOCAL: &'static str = "local"; - const PEER_ID_REMOTE: &'static str = "remote"; + const PEER_ID_LOCAL: &str = "local"; + const PEER_ID_REMOTE: &str = "remote"; #[rstest] fn initiate_outbound_session( @@ -912,7 +912,7 @@ mod tests { populate_and_materialize(&mut node_b, &config_b).await; let (tx, _rx) = broadcast::channel(8); - let target_set = TargetSet::new(&vec![config_a.schema.id().to_owned()]); + let target_set = TargetSet::new(&[config_a.schema.id().to_owned()]); let mut manager_a = SyncManager::new( node_a.context.store.clone(), @@ -947,7 +947,7 @@ mod tests { .await .unwrap(); - assert_eq!(result.is_done, false); + assert!(!result.is_done); assert_eq!( result.messages, vec![ @@ -962,13 +962,13 @@ mod tests { .handle_message(&PEER_ID_REMOTE, &result.messages[0]) .await .unwrap(); - assert_eq!(result_have.is_done, false); + assert!(!result_have.is_done); let result_done = manager_a .handle_message(&PEER_ID_REMOTE, &result.messages[1]) .await .unwrap(); - assert_eq!(result_done.is_done, true); + assert!(result_done.is_done); assert_eq!(result_have.messages.len(), 8); assert_eq!( @@ -983,7 +983,7 @@ mod tests { // Remote receives `Have`, `Entry` `SyncDone` messages from local for (index, message) in result_have.messages.iter().enumerate() { let result = manager_b - .handle_message(&PEER_ID_LOCAL, &message) + .handle_message(&PEER_ID_LOCAL, message) .await .unwrap(); diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index a4b5451ca..64d3d6a38 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -7,9 +7,8 @@ use std::time::Duration; use anyhow::Result; use libp2p::swarm::ConnectionId; use libp2p::PeerId; -use log::{debug, info, trace, warn}; +use log::{debug, info, warn}; use p2panda_rs::schema::SchemaId; -use p2panda_rs::Human; use tokio::sync::broadcast::Receiver; use tokio::task; use tokio::time::sleep; @@ -157,11 +156,9 @@ impl ConnectionManager { match peer { Some(peer) => { - if peer + if !peer .connections - .iter() - .find(|id| *id == &connection_id) - .is_none() + .iter().any(|id| id == &connection_id) { debug!("Tried to remove unknown connection: {peer_id} {connection_id:?}"); } else { @@ -190,8 +187,7 @@ impl ConnectionManager { if peer .connections .iter() - .find(|id| *id == &connection_id) - .is_some() + .any(|id| id == &connection_id) { warn!("Duplicate established connection encountered"); } else { @@ -256,8 +252,8 @@ impl ConnectionManager { async fn on_replication_finished( &mut self, peer_id: PeerId, - connection_id: ConnectionId, - session_id: SessionId, + _connection_id: ConnectionId, + _session_id: SessionId, ) { info!("Finished replication with peer {}", peer_id); match self.connections.get_mut(&peer_id) { diff --git a/aquadoggo/src/replication/strategies/diff.rs b/aquadoggo/src/replication/strategies/diff.rs index ea03a320c..cca7f419a 100644 --- a/aquadoggo/src/replication/strategies/diff.rs +++ b/aquadoggo/src/replication/strategies/diff.rs @@ -2,7 +2,7 @@ use std::collections::HashMap; -use log::{debug, trace}; +use log::trace; use p2panda_rs::{ entry::{LogId, SeqNum}, identity::PublicKey, @@ -18,7 +18,7 @@ fn remote_requires_entries( ) -> Option<(LogId, SeqNum)> { trace!("Local log height: {:?} {:?}", log_id, local_seq_num); // Get height of the remote log by it's id. - let remote_log_height = remote_log_heights.get(&log_id); + let remote_log_height = remote_log_heights.get(log_id); match remote_log_height { // If a log exists then compare heights of local and remote logs. @@ -27,7 +27,7 @@ fn remote_requires_entries( // If the local seq num is higher the remote needs all entries higher than // their max seq num for this log. - if local_seq_num > &remote_seq_num { + if local_seq_num > remote_seq_num { // We increment the seq num as we want it to represent an inclusive lower // bound. // @@ -69,14 +69,14 @@ pub fn diff_log_heights( ); let local_author_logs: HashMap = - local_author_logs.to_owned().into_iter().collect(); + local_author_logs.iter().copied().collect(); // Find all logs sent by the remote for a public key we have locally. // // If none is found we know they need everything we have by this author. - if let Some(remote_author_logs) = remote_log_heights.get(&local_author) { + if let Some(remote_author_logs) = remote_log_heights.get(local_author) { let remote_author_logs: HashMap = - remote_author_logs.to_owned().into_iter().collect(); + remote_author_logs.iter().copied().collect(); trace!("Remote log heights: {} {:?}", local_author.display(), { let mut logs = remote_author_logs @@ -111,9 +111,7 @@ pub fn diff_log_heights( // need everything we have. trace!("No logs found on remote for this author"); - let mut remote_needs_logs: Vec<(LogId, SeqNum)> = local_author_logs - .iter() - .map(|(log_id, _)| (*log_id, SeqNum::default())) + let mut remote_needs_logs: Vec<(LogId, SeqNum)> = local_author_logs.keys().map(|log_id| (*log_id, SeqNum::default())) .collect(); // Sort the log heights. diff --git a/aquadoggo/src/replication/strategies/naive.rs b/aquadoggo/src/replication/strategies/naive.rs index 299643713..638b86b5c 100644 --- a/aquadoggo/src/replication/strategies/naive.rs +++ b/aquadoggo/src/replication/strategies/naive.rs @@ -4,12 +4,10 @@ use std::collections::HashMap; use anyhow::Result; use async_trait::async_trait; -use log::{debug, info}; +use log::debug; use p2panda_rs::entry::traits::AsEntry; use p2panda_rs::entry::{LogId, SeqNum}; use p2panda_rs::identity::PublicKey; -use p2panda_rs::schema::SchemaId; -use p2panda_rs::test_utils::fixtures::public_key; use p2panda_rs::Human; use crate::db::SqlStore; @@ -69,7 +67,7 @@ impl NaiveStrategy { let local_log_heights = self.local_log_heights(store).await; let remote_needs = diff_log_heights( &local_log_heights, - &remote_log_heights.to_owned().into_iter().collect(), + &remote_log_heights.iter().cloned().collect(), ); for (public_key, log_heights) in remote_needs { diff --git a/aquadoggo/src/replication/target_set.rs b/aquadoggo/src/replication/target_set.rs index c144af8f4..bdb5890e2 100644 --- a/aquadoggo/src/replication/target_set.rs +++ b/aquadoggo/src/replication/target_set.rs @@ -29,10 +29,7 @@ impl TargetSet { // And now sort system schema to the front of the set. deduplicated_set.sort_by(|schema_id_a, schema_id_b| { let is_system_schema = |schema_id: &SchemaId| -> bool { - match schema_id { - SchemaId::Application(_, _) => false, - _ => true, - } + !matches!(schema_id, SchemaId::Application(_, _)) }; is_system_schema(schema_id_b).cmp(&is_system_schema(schema_id_a)) }); @@ -78,10 +75,7 @@ impl Validate for TargetSet { // If the first schema id is an application schema then no system schema should be // included and we flip the `initial_system_schema` flag. if index == 0 { - initial_system_schema = match schema_id { - SchemaId::Application(_, _) => false, - _ => true, - } + initial_system_schema = !matches!(schema_id, SchemaId::Application(_, _)) } // Now validate the order. From 91ceb9e5e0af3c931730127c964346799dcc64fd Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Wed, 31 May 2023 21:58:32 +0100 Subject: [PATCH 070/126] fmt --- aquadoggo/src/bus.rs | 2 +- aquadoggo/src/network/replication/handler.rs | 6 +++--- aquadoggo/src/replication/service.rs | 11 ++--------- aquadoggo/src/replication/strategies/diff.rs | 12 +++++++----- 4 files changed, 13 insertions(+), 18 deletions(-) diff --git a/aquadoggo/src/bus.rs b/aquadoggo/src/bus.rs index 243ba7fa8..f516cbbcd 100644 --- a/aquadoggo/src/bus.rs +++ b/aquadoggo/src/bus.rs @@ -1,7 +1,7 @@ // SPDX-License-Identifier: AGPL-3.0-or-later -use libp2p::PeerId; use libp2p::swarm::ConnectionId; +use libp2p::PeerId; use p2panda_rs::operation::OperationId; use crate::manager::Sender; diff --git a/aquadoggo/src/network/replication/handler.rs b/aquadoggo/src/network/replication/handler.rs index b153cd5ca..c6f3f26ba 100644 --- a/aquadoggo/src/network/replication/handler.rs +++ b/aquadoggo/src/network/replication/handler.rs @@ -323,9 +323,9 @@ impl ConnectionHandler for Handler { } Poll::Ready(Err(err)) => { warn!("{err:#?}"); - return Poll::Ready(ConnectionHandlerEvent::Close(HandlerError::Codec( - err, - ))); + return Poll::Ready(ConnectionHandlerEvent::Close( + HandlerError::Codec(err), + )); } Poll::Pending => { self.outbound_substream = diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index 64d3d6a38..e8470e3c2 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -156,10 +156,7 @@ impl ConnectionManager { match peer { Some(peer) => { - if !peer - .connections - .iter().any(|id| id == &connection_id) - { + if !peer.connections.iter().any(|id| id == &connection_id) { debug!("Tried to remove unknown connection: {peer_id} {connection_id:?}"); } else { debug!("Remove connection: {peer_id} {connection_id:?}"); @@ -184,11 +181,7 @@ impl ConnectionManager { match peer { Some(peer) => { - if peer - .connections - .iter() - .any(|id| id == &connection_id) - { + if peer.connections.iter().any(|id| id == &connection_id) { warn!("Duplicate established connection encountered"); } else { peer.connections.push(connection_id) diff --git a/aquadoggo/src/replication/strategies/diff.rs b/aquadoggo/src/replication/strategies/diff.rs index cca7f419a..3a3f70b90 100644 --- a/aquadoggo/src/replication/strategies/diff.rs +++ b/aquadoggo/src/replication/strategies/diff.rs @@ -37,7 +37,8 @@ fn remote_requires_entries( trace!( "Remote needs entries from {:?} for {:?}", - from_seq_num, log_id + from_seq_num, + log_id ); Some((log_id.to_owned(), from_seq_num)) @@ -68,8 +69,7 @@ pub fn diff_log_heights( local_author_logs ); - let local_author_logs: HashMap = - local_author_logs.iter().copied().collect(); + let local_author_logs: HashMap = local_author_logs.iter().copied().collect(); // Find all logs sent by the remote for a public key we have locally. // @@ -90,7 +90,7 @@ pub fn diff_log_heights( let mut remote_needs_logs = vec![]; // For each log we diff the local and remote height and determine which entries, if - // any, we should send them. + // any, we should send them. for (log_id, seq_num) in local_author_logs { if let Some(from_log_height) = remote_requires_entries(&log_id, &seq_num, &remote_author_logs) @@ -111,7 +111,9 @@ pub fn diff_log_heights( // need everything we have. trace!("No logs found on remote for this author"); - let mut remote_needs_logs: Vec<(LogId, SeqNum)> = local_author_logs.keys().map(|log_id| (*log_id, SeqNum::default())) + let mut remote_needs_logs: Vec<(LogId, SeqNum)> = local_author_logs + .keys() + .map(|log_id| (*log_id, SeqNum::default())) .collect(); // Sort the log heights. From c5373eff837ff15c36542480fe418eaa63b8913d Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Wed, 31 May 2023 22:56:26 +0100 Subject: [PATCH 071/126] More tests for TargetSet validation --- aquadoggo/src/replication/target_set.rs | 26 ++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/aquadoggo/src/replication/target_set.rs b/aquadoggo/src/replication/target_set.rs index bdb5890e2..95ca9afcd 100644 --- a/aquadoggo/src/replication/target_set.rs +++ b/aquadoggo/src/replication/target_set.rs @@ -168,18 +168,30 @@ mod tests { } #[rstest] - fn deserialize_unsorted_target_set() { - let unsorted_schema_ids = [ - "venues_0020c13cdc58dfc6f4ebd32992ff089db79980363144bdb2743693a019636fa72ec8", - "alpacas_00202dce4b32cd35d61cf54634b93a526df333c5ed3d93230c2f026f8d1ecabc0cd7", - ]; - let result = deserialize_into::(&serialize_value(cbor!(unsorted_schema_ids))); + #[case(vec![ + "venues_0020c13cdc58dfc6f4ebd32992ff089db79980363144bdb2743693a019636fa72ec8".to_string(), + "alpacas_00202dce4b32cd35d61cf54634b93a526df333c5ed3d93230c2f026f8d1ecabc0cd7".to_string(), + ])] + #[case(vec![ + "alpacas_00202dce4b32cd35d61cf54634b93a526df333c5ed3d93230c2f026f8d1ecabc0cd7".to_string(), + "schema_field_definition_v1".to_string(), + ])] + #[case(vec![ + "schema_field_definition_v1".to_string(), + "schema_definition_v1".to_string(), + ])] + #[case(vec![ + "schema_definition_v1".to_string(), + "alpacas_00202dce4b32cd35d61cf54634b93a526df333c5ed3d93230c2f026f8d1ecabc0cd7".to_string(), + "schema_field_definition_v1".to_string(), + ])] + fn deserialize_unsorted_target_set(#[case] schema_ids: Vec) { + let result = deserialize_into::(&serialize_value(cbor!(schema_ids))); let expected_result = ciborium::de::Error::::Semantic( None, "Target set contains unsorted or duplicate schema ids".to_string(), ); - assert_eq!(result.unwrap_err().to_string(), expected_result.to_string()); } From 8510d15314e8a9bcd571df63b4bd509cf004f28c Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Fri, 2 Jun 2023 14:39:51 +0100 Subject: [PATCH 072/126] Only identify peers by their PeerId (not ConnectionId) in replication logic --- aquadoggo/src/bus.rs | 9 +- .../src/network/replication/behaviour.rs | 52 +++-- aquadoggo/src/network/service.rs | 36 ++-- aquadoggo/src/replication/mod.rs | 2 +- aquadoggo/src/replication/service.rs | 189 +++++------------- 5 files changed, 101 insertions(+), 187 deletions(-) diff --git a/aquadoggo/src/bus.rs b/aquadoggo/src/bus.rs index f516cbbcd..98a2bab86 100644 --- a/aquadoggo/src/bus.rs +++ b/aquadoggo/src/bus.rs @@ -1,6 +1,5 @@ // SPDX-License-Identifier: AGPL-3.0-or-later -use libp2p::swarm::ConnectionId; use libp2p::PeerId; use p2panda_rs::operation::OperationId; @@ -20,14 +19,14 @@ pub enum ServiceMessage { InitiateReplication, /// Node established a bi-directional connection to another node. - ConnectionEstablished(PeerId, ConnectionId), + ConnectionEstablished(PeerId), /// Node closed a connection to another node. - ConnectionClosed(PeerId, ConnectionId), + ConnectionClosed(PeerId), /// Node sent a message to remote node for replication. - SentReplicationMessage(PeerId, ConnectionId, SyncMessage), + SentReplicationMessage(PeerId, SyncMessage), /// Node received a message from remote node for replication. - ReceivedReplicationMessage(PeerId, ConnectionId, SyncMessage), + ReceivedReplicationMessage(PeerId, SyncMessage), } diff --git a/aquadoggo/src/network/replication/behaviour.rs b/aquadoggo/src/network/replication/behaviour.rs index 55aee3dd9..399c12341 100644 --- a/aquadoggo/src/network/replication/behaviour.rs +++ b/aquadoggo/src/network/replication/behaviour.rs @@ -19,11 +19,11 @@ use crate::replication::SyncMessage; #[derive(Debug)] pub enum Event { /// Replication message received on the inbound stream. - MessageReceived(PeerId, SyncMessage, ConnectionId), + MessageReceived(PeerId, SyncMessage), - ConnectionEstablished(PeerId, ConnectionId), + ConnectionEstablished(PeerId), - ConnectionClosed(PeerId, ConnectionId), + ConnectionClosed(PeerId), } #[derive(Debug)] @@ -43,7 +43,6 @@ impl Behaviour { pub fn send_message( &mut self, peer_id: PeerId, - connection_id: ConnectionId, message: SyncMessage, ) { trace!( @@ -53,7 +52,7 @@ impl Behaviour { self.events.push_back(ToSwarm::NotifyHandler { peer_id, event: HandlerInEvent::Message(message), - handler: NotifyHandler::One(connection_id), + handler: NotifyHandler::Any, }); } @@ -61,7 +60,6 @@ impl Behaviour { &mut self, peer_id: &PeerId, message: SyncMessage, - connection_id: ConnectionId, ) { trace!( "Notify swarm of received sync message: {peer_id} {}", @@ -71,7 +69,6 @@ impl Behaviour { .push_back(ToSwarm::GenerateEvent(Event::MessageReceived( *peer_id, message, - connection_id, ))); } } @@ -104,12 +101,12 @@ impl NetworkBehaviour for Behaviour { fn on_connection_handler_event( &mut self, peer: PeerId, - connection_id: ConnectionId, + _connection_id: ConnectionId, handler_event: THandlerOutEvent, ) { match handler_event { HandlerOutEvent::Message(message) => { - self.handle_received_message(&peer, message, connection_id); + self.handle_received_message(&peer, message); } } } @@ -118,18 +115,18 @@ impl NetworkBehaviour for Behaviour { match event { FromSwarm::ConnectionClosed(ConnectionClosed { peer_id, - connection_id, + remaining_established, .. }) => { - self.events - .push_back(ToSwarm::GenerateEvent(Event::ConnectionClosed( - peer_id, - connection_id, - ))); + if remaining_established == 0 { + self.events + .push_back(ToSwarm::GenerateEvent(Event::ConnectionClosed( + peer_id, + ))); + } } FromSwarm::ConnectionEstablished(ConnectionEstablished { peer_id, - connection_id, other_established, .. }) => { @@ -139,12 +136,12 @@ impl NetworkBehaviour for Behaviour { other_established + 1, peer_id ); + } else { + self.events + .push_back(ToSwarm::GenerateEvent(Event::ConnectionEstablished( + peer_id, + ))); } - self.events - .push_back(ToSwarm::GenerateEvent(Event::ConnectionEstablished( - peer_id, - connection_id, - ))); } FromSwarm::AddressChange(_) | FromSwarm::DialFailure(_) @@ -175,7 +172,7 @@ impl NetworkBehaviour for Behaviour { #[cfg(test)] mod tests { use futures::FutureExt; - use libp2p::swarm::{keep_alive, ConnectionId, Swarm}; + use libp2p::swarm::{keep_alive, Swarm}; use libp2p_swarm_test::SwarmExt; use p2panda_rs::schema::SchemaId; use rstest::rstest; @@ -251,7 +248,6 @@ mod tests { // Send a message from to swarm1 local peer from swarm2 local peer. swarm1.behaviour_mut().send_message( swarm2_peer_id, - ConnectionId::new_unchecked(0), SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))), ); @@ -290,30 +286,28 @@ mod tests { // Send a message from swarm1 to peer2. swarm1.behaviour_mut().send_message( swarm2_peer_id, - ConnectionId::new_unchecked(0), SyncMessage::new(0, Message::SyncRequest(0.into(), target_set_1.clone())), ); // Send a message from swarm2 peer1. swarm2.behaviour_mut().send_message( swarm1_peer_id, - ConnectionId::new_unchecked(0), SyncMessage::new(1, Message::SyncRequest(0.into(), target_set_2.clone())), ); // Collect the next 2 behaviour events which occur in either swarms. for _ in 0..2 { tokio::select! { - Event::ConnectionEstablished(peer_id, _) = swarm1.next_behaviour_event() => res1.push((peer_id, None)), - Event::ConnectionEstablished(peer_id, _) = swarm2.next_behaviour_event() => res2.push((peer_id, None)), + Event::ConnectionEstablished(peer_id) = swarm1.next_behaviour_event() => res1.push((peer_id, None)), + Event::ConnectionEstablished(peer_id) = swarm2.next_behaviour_event() => res2.push((peer_id, None)), } } // And again add the next 2 behaviour events which occur in either swarms. for _ in 0..2 { tokio::select! { - Event::MessageReceived(peer_id, message, _) = swarm1.next_behaviour_event() => res1.push((peer_id, Some(message))), - Event::MessageReceived(peer_id, message, _) = swarm2.next_behaviour_event() => res2.push((peer_id, Some(message))), + Event::MessageReceived(peer_id, message) = swarm1.next_behaviour_event() => res1.push((peer_id, Some(message))), + Event::MessageReceived(peer_id, message) = swarm2.next_behaviour_event() => res2.push((peer_id, Some(message))), } } diff --git a/aquadoggo/src/network/service.rs b/aquadoggo/src/network/service.rs index dfc563cce..c3b4f5443 100644 --- a/aquadoggo/src/network/service.rs +++ b/aquadoggo/src/network/service.rs @@ -159,12 +159,11 @@ impl EventLoop { /// Handle an incoming message via the communication bus from other services. async fn handle_service_message(&mut self, message: ServiceMessage) { - if let ServiceMessage::SentReplicationMessage(peer_id, connection_id, sync_message) = + if let ServiceMessage::SentReplicationMessage(peer_id, sync_message) = message { self.swarm.behaviour_mut().replication.send_message( peer_id, - connection_id, sync_message, ); } @@ -254,17 +253,21 @@ impl EventLoop { for (peer_id, multiaddr) in list { debug!("mDNS discovered a new peer: {peer_id}"); - // Only dial the newly discovered peer if we're not already connected. - // - // @TODO: Is this even a thing? Trying to catch the case where two peers - // simultaneously discover and connect to each other. - if !self.swarm.is_connected(&peer_id) { - if let Err(err) = self.swarm.dial(multiaddr) { - warn!("Failed to dial: {}", err); - } - } else { - warn!("Not dialing discovered peer as connection already exists: {peer_id:?}") + if let Err(err) = self.swarm.dial(multiaddr) { + warn!("Failed to dial: {}", err); } + + // // Only dial the newly discovered peer if we're not already connected. + // // + // // @TODO: Is this even a thing? Trying to catch the case where two peers + // // simultaneously discover and connect to each other. + // if !self.swarm.is_connected(&peer_id) { + // if let Err(err) = self.swarm.dial(multiaddr) { + // warn!("Failed to dial: {}", err); + // } + // } else { + // warn!("Not dialing discovered peer as connection already exists: {peer_id:?}") + // } } } mdns::Event::Expired(list) => { @@ -435,24 +438,21 @@ impl EventLoop { // Replication // ~~~~~~~~~~~ SwarmEvent::Behaviour(BehaviourEvent::Replication(event)) => match event { - replication::Event::MessageReceived(peer_id, message, connection_id) => self + replication::Event::MessageReceived(peer_id, message) => self .send_service_message(ServiceMessage::ReceivedReplicationMessage( peer_id, - connection_id, message, )), - replication::Event::ConnectionEstablished(peer_id, connection_id) => { + replication::Event::ConnectionEstablished(peer_id) => { // Inform other services about new connection self.send_service_message(ServiceMessage::ConnectionEstablished( peer_id, - connection_id, )); } - replication::Event::ConnectionClosed(peer_id, connection_id) => { + replication::Event::ConnectionClosed(peer_id) => { // Inform other services about closed connection self.send_service_message(ServiceMessage::ConnectionClosed( peer_id, - connection_id, )); } }, diff --git a/aquadoggo/src/replication/mod.rs b/aquadoggo/src/replication/mod.rs index 2195f7b60..e3584fedc 100644 --- a/aquadoggo/src/replication/mod.rs +++ b/aquadoggo/src/replication/mod.rs @@ -15,7 +15,7 @@ pub use ingest::SyncIngest; pub use manager::SyncManager; pub use message::{LiveMode, LogHeight, Message, SyncMessage}; pub use mode::Mode; -pub use service::{replication_service, PeerConnectionId}; +pub use service::replication_service; pub use session::{Session, SessionId, SessionState}; pub use strategies::{NaiveStrategy, SetReconciliationStrategy, StrategyResult}; pub use target_set::TargetSet; diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index e8470e3c2..1c3b77ba9 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -1,11 +1,9 @@ // SPDX-License-Identifier: AGPL-3.0-or-later use std::collections::HashMap; -use std::fmt::Display; use std::time::Duration; use anyhow::Result; -use libp2p::swarm::ConnectionId; use libp2p::PeerId; use log::{debug, info, warn}; use p2panda_rs::schema::SchemaId; @@ -25,27 +23,6 @@ use super::SessionId; const MAX_SESSIONS_PER_CONNECTION: usize = 3; -#[derive(PartialEq, Eq, PartialOrd, Clone, Debug, Hash)] - -/// Identifier for a connection to another peer. The `ConnectionId` is optional as we need to -/// identify the local peer in the replication manager, but in this case there is no single -/// connection to associate with. -/// -/// @TODO: This could be modelled better maybe... -pub struct PeerConnectionId(PeerId, Option); - -impl Display for PeerConnectionId { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!( - f, - "{}{:?}", - self.0, - // If we don't pass a connection id then we default to `0` - self.1.unwrap_or(ConnectionId::new_unchecked(0)) - ) - } -} - pub async fn replication_service( context: Context, shutdown: Shutdown, @@ -116,15 +93,9 @@ impl PeerStatus { } } -#[derive(Debug, Clone, PartialEq, Eq)] -struct PeerConnections { - status: PeerStatus, - connections: Vec, -} - struct ConnectionManager { - connections: HashMap, - sync_manager: SyncManager, + peers: HashMap, + sync_manager: SyncManager, tx: ServiceSender, rx: Receiver, target_set: TargetSet, @@ -139,11 +110,10 @@ impl ConnectionManager { target_set: TargetSet, ) -> Self { let ingest = SyncIngest::new(schema_provider.clone(), tx.clone()); - let sync_manager = - SyncManager::new(store.clone(), ingest, PeerConnectionId(local_peer_id, None)); + let sync_manager = SyncManager::new(store.clone(), ingest, local_peer_id); Self { - connections: HashMap::new(), + peers: HashMap::new(), sync_manager, tx: tx.clone(), rx: tx.subscribe(), @@ -151,92 +121,57 @@ impl ConnectionManager { } } - fn remove_connection(&mut self, peer_id: PeerId, connection_id: ConnectionId) { - let peer = self.connections.get_mut(&peer_id); - - match peer { - Some(peer) => { - if !peer.connections.iter().any(|id| id == &connection_id) { - debug!("Tried to remove unknown connection: {peer_id} {connection_id:?}"); - } else { - debug!("Remove connection: {peer_id} {connection_id:?}"); - - peer.connections = peer - .connections - .iter() - .filter(|id| *id != &connection_id) - .map(ConnectionId::to_owned) - .collect(); - }; - } - None => { - warn!("Tried to remove connection from unknown peer"); - } + fn remove_connection(&mut self, peer_id: PeerId) { + match self.peers.remove(&peer_id) { + Some(_) => debug!("Remove peer: {peer_id}"), + None => warn!("Tried to remove connection from unknown peer"), } } - async fn on_connection_established(&mut self, peer_id: PeerId, connection_id: ConnectionId) { + async fn on_connection_established(&mut self, peer_id: PeerId) { info!("Connection established with peer: {}", peer_id); - let peer = self.connections.get_mut(&peer_id); - - match peer { - Some(peer) => { - if peer.connections.iter().any(|id| id == &connection_id) { - warn!("Duplicate established connection encountered"); - } else { - peer.connections.push(connection_id) - } + match self.peers.get(&peer_id) { + Some(_) => { + warn!("Peer already known: {peer_id}"); } None => { - let peer_connections = PeerConnections { - status: PeerStatus::new(&peer_id), - connections: vec![connection_id], - }; - self.connections.insert(peer_id, peer_connections); + self.peers.insert(peer_id, PeerStatus::new(&peer_id)); + self.update_sessions().await; } } - - self.update_sessions().await; } - async fn on_connection_closed(&mut self, peer_id: PeerId, connection_id: ConnectionId) { + async fn on_connection_closed(&mut self, peer_id: PeerId) { // Clear running replication sessions from sync manager info!("Connection closed: remove sessions with peer: {}", peer_id); - let peer_connection_id = PeerConnectionId(peer_id, Some(connection_id)); - self.sync_manager.remove_sessions(&peer_connection_id); - self.remove_connection(peer_id, connection_id) + self.sync_manager.remove_sessions(&peer_id); + self.remove_connection(peer_id) } async fn on_replication_message( &mut self, peer_id: PeerId, message: SyncMessage, - connection_id: ConnectionId, ) { let session_id = message.session_id(); - match self - .sync_manager - .handle_message(&PeerConnectionId(peer_id, Some(connection_id)), &message) - .await - { + match self.sync_manager.handle_message(&peer_id, &message).await { Ok(result) => { for message in result.messages { self.send_service_message(ServiceMessage::SentReplicationMessage( peer_id, - connection_id, message, )); } if result.is_done { - self.on_replication_finished(peer_id, connection_id, session_id) + self.on_replication_finished(peer_id, session_id) .await; } } Err(err) => { - self.on_replication_error(peer_id, connection_id, session_id, err) + self.on_replication_error(peer_id, session_id, err) .await; } } @@ -245,13 +180,12 @@ impl ConnectionManager { async fn on_replication_finished( &mut self, peer_id: PeerId, - _connection_id: ConnectionId, _session_id: SessionId, ) { info!("Finished replication with peer {}", peer_id); - match self.connections.get_mut(&peer_id) { - Some(peer) => { - peer.status.successful_count += 1; + match self.peers.get_mut(&peer_id) { + Some(status) => { + status.successful_count += 1; } None => { panic!("Tried to access unknown peer"); @@ -262,15 +196,14 @@ impl ConnectionManager { async fn on_replication_error( &mut self, peer_id: PeerId, - connection_id: ConnectionId, session_id: SessionId, error: ReplicationError, ) { warn!("Replication with peer {} failed: {}", peer_id, error); - match self.connections.get_mut(&peer_id) { - Some(peer) => { - peer.status.failed_count += 1; + match self.peers.get_mut(&peer_id) { + Some(status) => { + status.failed_count += 1; } None => { panic!("Tried to access unknown peer"); @@ -279,9 +212,7 @@ impl ConnectionManager { match error { ReplicationError::StrategyFailed(_) | ReplicationError::Validation(_) => { - let peer_connection_id = PeerConnectionId(peer_id, Some(connection_id)); - self.sync_manager - .remove_session(&peer_connection_id, &session_id); + self.sync_manager.remove_session(&peer_id, &session_id); } _ => (), // Don't try and close the session on other errors as it should not have been initiated } @@ -289,14 +220,14 @@ impl ConnectionManager { async fn handle_service_message(&mut self, message: ServiceMessage) { match message { - ServiceMessage::ConnectionEstablished(peer_id, connection_id) => { - self.on_connection_established(peer_id, connection_id).await; + ServiceMessage::ConnectionEstablished(peer_id) => { + self.on_connection_established(peer_id).await; } - ServiceMessage::ConnectionClosed(peer_id, connection_id) => { - self.on_connection_closed(peer_id, connection_id).await; + ServiceMessage::ConnectionClosed(peer_id) => { + self.on_connection_closed(peer_id).await; } - ServiceMessage::ReceivedReplicationMessage(peer_id, connection_id, message) => { - self.on_replication_message(peer_id, message, connection_id) + ServiceMessage::ReceivedReplicationMessage(peer_id, message) => { + self.on_replication_message(peer_id, message) .await; } ServiceMessage::InitiateReplication => { @@ -315,31 +246,24 @@ impl ConnectionManager { async fn update_sessions(&mut self) { // Iterate through all currently connected peers - let attempt_peers: Vec = self - .connections - .iter() - .flat_map(|(peer_id, peer_connections)| { - // Find out how many sessions we know about for each peer - let mut connections = vec![]; - for connection_id in &peer_connections.connections { - let peer_connection_id = PeerConnectionId(*peer_id, Some(*connection_id)); - let sessions = self.sync_manager.get_sessions(&peer_connection_id); - let active_sessions: Vec<&Session> = sessions - .iter() - .filter(|session| session.is_done()) - .collect(); - - // Check if we're running too many sessions with that peer on this connection already - if active_sessions.len() < MAX_SESSIONS_PER_CONNECTION { - connections.push(peer_connection_id.to_owned()); - } else { - debug!( - "Max sessions reached for connection: {:?}", - peer_connection_id - ); - } + let attempt_peers: Vec = self + .peers + .clone() + .into_iter() + .filter_map(|(peer_id, _)| { + let sessions = self.sync_manager.get_sessions(&peer_id); + let active_sessions: Vec<&Session> = sessions + .iter() + .filter(|session| session.is_done()) + .collect(); + + // Check if we're running too many sessions with that peer on this connection already + if active_sessions.len() < MAX_SESSIONS_PER_CONNECTION { + Some(peer_id) + } else { + debug!("Max sessions reached for peer: {:?}", peer_id); + None } - connections }) .collect(); @@ -347,24 +271,21 @@ impl ConnectionManager { info!("No peers available for replication") } - for peer_connection_id in attempt_peers { - self.initiate_replication(&peer_connection_id).await; + for peer_id in attempt_peers { + self.initiate_replication(&peer_id).await; } } - async fn initiate_replication(&mut self, peer_connection_id: &PeerConnectionId) { + async fn initiate_replication(&mut self, peer_id: &PeerId) { match self .sync_manager - .initiate_session(peer_connection_id, &self.target_set, &Mode::Naive) + .initiate_session(peer_id, &self.target_set, &Mode::Naive) .await { Ok(messages) => { for message in messages { self.send_service_message(ServiceMessage::SentReplicationMessage( - peer_connection_id.0, - peer_connection_id - .1 - .expect("Remote peer found without a connection id"), + peer_id.clone(), message, )); } From 8512680e93699ab0a9483d0a676194481d856946 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Fri, 2 Jun 2023 15:02:44 +0100 Subject: [PATCH 073/126] Rename ConnectionEstablished to PeerConnected etc.. --- aquadoggo/src/bus.rs | 4 ++-- aquadoggo/src/network/replication/behaviour.rs | 12 ++++++------ aquadoggo/src/network/service.rs | 8 ++++---- aquadoggo/src/replication/service.rs | 11 ++++++----- 4 files changed, 18 insertions(+), 17 deletions(-) diff --git a/aquadoggo/src/bus.rs b/aquadoggo/src/bus.rs index 98a2bab86..1b621ffc6 100644 --- a/aquadoggo/src/bus.rs +++ b/aquadoggo/src/bus.rs @@ -19,10 +19,10 @@ pub enum ServiceMessage { InitiateReplication, /// Node established a bi-directional connection to another node. - ConnectionEstablished(PeerId), + PeerConnected(PeerId), /// Node closed a connection to another node. - ConnectionClosed(PeerId), + PeerDisconnected(PeerId), /// Node sent a message to remote node for replication. SentReplicationMessage(PeerId, SyncMessage), diff --git a/aquadoggo/src/network/replication/behaviour.rs b/aquadoggo/src/network/replication/behaviour.rs index 399c12341..dcc6011ad 100644 --- a/aquadoggo/src/network/replication/behaviour.rs +++ b/aquadoggo/src/network/replication/behaviour.rs @@ -21,9 +21,9 @@ pub enum Event { /// Replication message received on the inbound stream. MessageReceived(PeerId, SyncMessage), - ConnectionEstablished(PeerId), + PeerConnected(PeerId), - ConnectionClosed(PeerId), + PeerDisconnected(PeerId), } #[derive(Debug)] @@ -120,7 +120,7 @@ impl NetworkBehaviour for Behaviour { }) => { if remaining_established == 0 { self.events - .push_back(ToSwarm::GenerateEvent(Event::ConnectionClosed( + .push_back(ToSwarm::GenerateEvent(Event::PeerDisconnected( peer_id, ))); } @@ -138,7 +138,7 @@ impl NetworkBehaviour for Behaviour { ); } else { self.events - .push_back(ToSwarm::GenerateEvent(Event::ConnectionEstablished( + .push_back(ToSwarm::GenerateEvent(Event::PeerConnected( peer_id, ))); } @@ -298,8 +298,8 @@ mod tests { // Collect the next 2 behaviour events which occur in either swarms. for _ in 0..2 { tokio::select! { - Event::ConnectionEstablished(peer_id) = swarm1.next_behaviour_event() => res1.push((peer_id, None)), - Event::ConnectionEstablished(peer_id) = swarm2.next_behaviour_event() => res2.push((peer_id, None)), + Event::PeerConnected(peer_id) = swarm1.next_behaviour_event() => res1.push((peer_id, None)), + Event::PeerConnected(peer_id) = swarm2.next_behaviour_event() => res2.push((peer_id, None)), } } diff --git a/aquadoggo/src/network/service.rs b/aquadoggo/src/network/service.rs index c3b4f5443..885259dce 100644 --- a/aquadoggo/src/network/service.rs +++ b/aquadoggo/src/network/service.rs @@ -443,15 +443,15 @@ impl EventLoop { peer_id, message, )), - replication::Event::ConnectionEstablished(peer_id) => { + replication::Event::PeerConnected(peer_id) => { // Inform other services about new connection - self.send_service_message(ServiceMessage::ConnectionEstablished( + self.send_service_message(ServiceMessage::PeerConnected( peer_id, )); } - replication::Event::ConnectionClosed(peer_id) => { + replication::Event::PeerDisconnected(peer_id) => { // Inform other services about closed connection - self.send_service_message(ServiceMessage::ConnectionClosed( + self.send_service_message(ServiceMessage::PeerDisconnected( peer_id, )); } diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index 1c3b77ba9..4bae23c97 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -129,7 +129,8 @@ impl ConnectionManager { } async fn on_connection_established(&mut self, peer_id: PeerId) { - info!("Connection established with peer: {}", peer_id); + info!("Connected to peer: {peer_id}"); + match self.peers.get(&peer_id) { Some(_) => { warn!("Peer already known: {peer_id}"); @@ -142,9 +143,9 @@ impl ConnectionManager { } async fn on_connection_closed(&mut self, peer_id: PeerId) { - // Clear running replication sessions from sync manager - info!("Connection closed: remove sessions with peer: {}", peer_id); + info!("Disconnected from peer: {peer_id}"); + // Clear running replication sessions from sync manager self.sync_manager.remove_sessions(&peer_id); self.remove_connection(peer_id) } @@ -220,10 +221,10 @@ impl ConnectionManager { async fn handle_service_message(&mut self, message: ServiceMessage) { match message { - ServiceMessage::ConnectionEstablished(peer_id) => { + ServiceMessage::PeerConnected(peer_id) => { self.on_connection_established(peer_id).await; } - ServiceMessage::ConnectionClosed(peer_id) => { + ServiceMessage::PeerDisconnected(peer_id) => { self.on_connection_closed(peer_id).await; } ServiceMessage::ReceivedReplicationMessage(peer_id, message) => { From 8dad2892566868764d0c14069002e17eb44a7520 Mon Sep 17 00:00:00 2001 From: adz Date: Sat, 3 Jun 2023 11:44:35 +0200 Subject: [PATCH 074/126] Poll ticking stream for scheduling replication --- Cargo.lock | 12 +++++ aquadoggo/Cargo.toml | 1 + aquadoggo/src/bus.rs | 3 -- aquadoggo/src/network/service.rs | 29 ++++------ aquadoggo/src/replication/service.rs | 79 ++++++++++++---------------- 5 files changed, 56 insertions(+), 68 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ec634c091..81f11f228 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -180,6 +180,7 @@ dependencies = [ "env_logger", "envy", "futures", + "futures-ticker", "hex", "http", "hyper", @@ -1788,6 +1789,17 @@ version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" +[[package]] +name = "futures-ticker" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9763058047f713632a52e916cc7f6a4b3fc6e9fc1ff8c5b1dc49e5a89041682e" +dependencies = [ + "futures", + "futures-timer", + "instant", +] + [[package]] name = "futures-timer" version = "3.0.2" diff --git a/aquadoggo/Cargo.toml b/aquadoggo/Cargo.toml index 0745042cd..025990dcd 100644 --- a/aquadoggo/Cargo.toml +++ b/aquadoggo/Cargo.toml @@ -31,6 +31,7 @@ directories = "4.0.1" dynamic-graphql = "0.7.3" envy = "0.4.2" futures = "0.3.23" +futures-ticker = "0.0.3" hex = "0.4.3" http = "0.2.9" libp2p = { version = "0.51.3", features = [ diff --git a/aquadoggo/src/bus.rs b/aquadoggo/src/bus.rs index 1b621ffc6..7b4bf6863 100644 --- a/aquadoggo/src/bus.rs +++ b/aquadoggo/src/bus.rs @@ -15,9 +15,6 @@ pub enum ServiceMessage { /// A new operation arrived at the node. NewOperation(OperationId), - /// Message from scheduler to initiate replication on available peers. - InitiateReplication, - /// Node established a bi-directional connection to another node. PeerConnected(PeerId), diff --git a/aquadoggo/src/network/service.rs b/aquadoggo/src/network/service.rs index 885259dce..89bc8a7fe 100644 --- a/aquadoggo/src/network/service.rs +++ b/aquadoggo/src/network/service.rs @@ -141,7 +141,6 @@ impl EventLoop { } // Command channel closed, thus shutting down the network event loop None => { - warn!("CLOSED"); return }, }, @@ -159,13 +158,11 @@ impl EventLoop { /// Handle an incoming message via the communication bus from other services. async fn handle_service_message(&mut self, message: ServiceMessage) { - if let ServiceMessage::SentReplicationMessage(peer_id, sync_message) = - message - { - self.swarm.behaviour_mut().replication.send_message( - peer_id, - sync_message, - ); + if let ServiceMessage::SentReplicationMessage(peer_id, sync_message) = message { + self.swarm + .behaviour_mut() + .replication + .send_message(peer_id, sync_message); } } @@ -438,22 +435,16 @@ impl EventLoop { // Replication // ~~~~~~~~~~~ SwarmEvent::Behaviour(BehaviourEvent::Replication(event)) => match event { - replication::Event::MessageReceived(peer_id, message) => self - .send_service_message(ServiceMessage::ReceivedReplicationMessage( - peer_id, - message, - )), + replication::Event::MessageReceived(peer_id, message) => self.send_service_message( + ServiceMessage::ReceivedReplicationMessage(peer_id, message), + ), replication::Event::PeerConnected(peer_id) => { // Inform other services about new connection - self.send_service_message(ServiceMessage::PeerConnected( - peer_id, - )); + self.send_service_message(ServiceMessage::PeerConnected(peer_id)); } replication::Event::PeerDisconnected(peer_id) => { // Inform other services about closed connection - self.send_service_message(ServiceMessage::PeerDisconnected( - peer_id, - )); + self.send_service_message(ServiceMessage::PeerDisconnected(peer_id)); } }, diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index 4bae23c97..9d55de152 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -4,24 +4,26 @@ use std::collections::HashMap; use std::time::Duration; use anyhow::Result; +use futures_ticker::Ticker; use libp2p::PeerId; use log::{debug, info, warn}; use p2panda_rs::schema::SchemaId; -use tokio::sync::broadcast::Receiver; use tokio::task; -use tokio::time::sleep; +use tokio_stream::wrappers::BroadcastStream; +use tokio_stream::StreamExt; use crate::bus::{ServiceMessage, ServiceSender}; use crate::context::Context; use crate::db::SqlStore; use crate::manager::{ServiceReadySender, Shutdown}; use crate::replication::errors::ReplicationError; -use crate::replication::{Mode, Session, SyncIngest, SyncManager, SyncMessage, TargetSet}; +use crate::replication::{ + Mode, Session, SessionId, SyncIngest, SyncManager, SyncMessage, TargetSet, +}; use crate::schema::SchemaProvider; -use super::SessionId; - const MAX_SESSIONS_PER_CONNECTION: usize = 3; +const UPDATE_INTERVAL: Duration = Duration::from_secs(5); pub async fn replication_service( context: Context, @@ -57,7 +59,6 @@ pub async fn replication_service( target_set, ); - let scheduler_handle = task::spawn(ConnectionManager::start_scheduler(tx.clone())); let handle = task::spawn(manager.run()); if tx_ready.send(()).is_err() { @@ -66,7 +67,6 @@ pub async fn replication_service( tokio::select! { _ = handle => (), - _ = scheduler_handle => (), _ = shutdown => { // @TODO: Wait until all pending replication processes are completed during graceful // shutdown @@ -96,8 +96,9 @@ impl PeerStatus { struct ConnectionManager { peers: HashMap, sync_manager: SyncManager, + scheduler: Ticker, tx: ServiceSender, - rx: Receiver, + rx: BroadcastStream, target_set: TargetSet, } @@ -111,12 +112,14 @@ impl ConnectionManager { ) -> Self { let ingest = SyncIngest::new(schema_provider.clone(), tx.clone()); let sync_manager = SyncManager::new(store.clone(), ingest, local_peer_id); + let scheduler = Ticker::new(UPDATE_INTERVAL); Self { peers: HashMap::new(), sync_manager, + scheduler, tx: tx.clone(), - rx: tx.subscribe(), + rx: BroadcastStream::new(tx.subscribe()), target_set, } } @@ -130,7 +133,7 @@ impl ConnectionManager { async fn on_connection_established(&mut self, peer_id: PeerId) { info!("Connected to peer: {peer_id}"); - + match self.peers.get(&peer_id) { Some(_) => { warn!("Peer already known: {peer_id}"); @@ -150,39 +153,28 @@ impl ConnectionManager { self.remove_connection(peer_id) } - async fn on_replication_message( - &mut self, - peer_id: PeerId, - message: SyncMessage, - ) { + async fn on_replication_message(&mut self, peer_id: PeerId, message: SyncMessage) { let session_id = message.session_id(); match self.sync_manager.handle_message(&peer_id, &message).await { Ok(result) => { for message in result.messages { self.send_service_message(ServiceMessage::SentReplicationMessage( - peer_id, - message, + peer_id, message, )); } if result.is_done { - self.on_replication_finished(peer_id, session_id) - .await; + self.on_replication_finished(peer_id, session_id).await; } } Err(err) => { - self.on_replication_error(peer_id, session_id, err) - .await; + self.on_replication_error(peer_id, session_id, err).await; } } } - async fn on_replication_finished( - &mut self, - peer_id: PeerId, - _session_id: SessionId, - ) { + async fn on_replication_finished(&mut self, peer_id: PeerId, _session_id: SessionId) { info!("Finished replication with peer {}", peer_id); match self.peers.get_mut(&peer_id) { Some(status) => { @@ -228,11 +220,7 @@ impl ConnectionManager { self.on_connection_closed(peer_id).await; } ServiceMessage::ReceivedReplicationMessage(peer_id, message) => { - self.on_replication_message(peer_id, message) - .await; - } - ServiceMessage::InitiateReplication => { - self.update_sessions().await; + self.on_replication_message(peer_id, message).await; } _ => (), // Ignore all other messages } @@ -269,7 +257,7 @@ impl ConnectionManager { .collect(); if attempt_peers.is_empty() { - info!("No peers available for replication") + debug!("No peers available for replication") } for peer_id in attempt_peers { @@ -299,22 +287,21 @@ impl ConnectionManager { pub async fn run(mut self) { loop { - match self.rx.recv().await { - Ok(message) => self.handle_service_message(message).await, - Err(err) => { - panic!("Service bus subscriber failed: {}", err); + tokio::select! { + event = self.rx.next() => match event { + Some(Ok(message)) => self.handle_service_message(message).await, + Some(Err(err)) => { + panic!("Service bus subscriber for connection manager loop failed: {}", err); + } + // Command channel closed, thus shutting down the network event loop + None => { + return + }, + }, + Some(_) = self.scheduler.next() => { + self.update_sessions().await } } } } - - pub async fn start_scheduler(tx: ServiceSender) { - loop { - sleep(Duration::from_secs(5)).await; - if tx.send(ServiceMessage::InitiateReplication).is_err() { - // Silently fail here as we don't care if the message was received at this - // point - } - } - } } From 58a024e33e9b5c7dcea2e4ed596c85c20f6bdb4d Mon Sep 17 00:00:00 2001 From: adz Date: Sat, 3 Jun 2023 11:50:39 +0200 Subject: [PATCH 075/126] Dynamically retrieve target set when starting replication --- aquadoggo/src/replication/service.rs | 44 +++++++++++++--------------- 1 file changed, 21 insertions(+), 23 deletions(-) diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index 9d55de152..99bc7289a 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -40,24 +40,9 @@ pub async fn replication_service( .peer_id .expect("Peer id needs to be given"); - // Define set of schema ids we are interested in - let supported_schema_ids: Vec = context - .schema_provider - .all() - .await - .iter() - .map(|schema| schema.id().to_owned()) - .collect(); - let target_set = TargetSet::new(&supported_schema_ids); - // Run a connection manager which deals with the replication logic - let manager = ConnectionManager::new( - &context.schema_provider, - &context.store, - &tx, - local_peer_id, - target_set, - ); + let manager = + ConnectionManager::new(&context.schema_provider, &context.store, &tx, local_peer_id); let handle = task::spawn(manager.run()); @@ -99,7 +84,7 @@ struct ConnectionManager { scheduler: Ticker, tx: ServiceSender, rx: BroadcastStream, - target_set: TargetSet, + schema_provider: SchemaProvider, } impl ConnectionManager { @@ -108,7 +93,6 @@ impl ConnectionManager { store: &SqlStore, tx: &ServiceSender, local_peer_id: PeerId, - target_set: TargetSet, ) -> Self { let ingest = SyncIngest::new(schema_provider.clone(), tx.clone()); let sync_manager = SyncManager::new(store.clone(), ingest, local_peer_id); @@ -120,10 +104,22 @@ impl ConnectionManager { scheduler, tx: tx.clone(), rx: BroadcastStream::new(tx.subscribe()), - target_set, + schema_provider: schema_provider.clone(), } } + /// Define set of schema ids we are interested in. + async fn target_set(&self) -> TargetSet { + let supported_schema_ids: Vec = self + .schema_provider + .all() + .await + .iter() + .map(|schema| schema.id().to_owned()) + .collect(); + TargetSet::new(&supported_schema_ids) + } + fn remove_connection(&mut self, peer_id: PeerId) { match self.peers.remove(&peer_id) { Some(_) => debug!("Remove peer: {peer_id}"), @@ -176,6 +172,7 @@ impl ConnectionManager { async fn on_replication_finished(&mut self, peer_id: PeerId, _session_id: SessionId) { info!("Finished replication with peer {}", peer_id); + match self.peers.get_mut(&peer_id) { Some(status) => { status.successful_count += 1; @@ -266,16 +263,17 @@ impl ConnectionManager { } async fn initiate_replication(&mut self, peer_id: &PeerId) { + let target_set = self.target_set().await; + match self .sync_manager - .initiate_session(peer_id, &self.target_set, &Mode::Naive) + .initiate_session(peer_id, &target_set, &Mode::Naive) .await { Ok(messages) => { for message in messages { self.send_service_message(ServiceMessage::SentReplicationMessage( - peer_id.clone(), - message, + *peer_id, message, )); } } From fc5d45bb5dd8b993d4548d7658ac91065ad7776f Mon Sep 17 00:00:00 2001 From: adz Date: Sat, 3 Jun 2023 11:56:02 +0200 Subject: [PATCH 076/126] Add some more doc strings --- aquadoggo/src/replication/service.rs | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index 99bc7289a..3e48fc4e4 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -22,7 +22,10 @@ use crate::replication::{ }; use crate::schema::SchemaProvider; -const MAX_SESSIONS_PER_CONNECTION: usize = 3; +/// Maximum of replication sessions per peer. +const MAX_SESSIONS_PER_PEER: usize = 3; + +/// How often does the scheduler check for initiating replication sessions with peers. const UPDATE_INTERVAL: Duration = Duration::from_secs(5); pub async fn replication_service( @@ -31,7 +34,6 @@ pub async fn replication_service( tx: ServiceSender, tx_ready: ServiceReadySender, ) -> Result<()> { - // Subscribe to communication bus let _rx = tx.subscribe(); let local_peer_id = context @@ -43,7 +45,6 @@ pub async fn replication_service( // Run a connection manager which deals with the replication logic let manager = ConnectionManager::new(&context.schema_provider, &context.store, &tx, local_peer_id); - let handle = task::spawn(manager.run()); if tx_ready.send(()).is_err() { @@ -79,11 +80,23 @@ impl PeerStatus { } struct ConnectionManager { + /// List of peers the connection mananger knows about and are available for replication. peers: HashMap, + + /// Replication state manager, data ingest and message generator for handling all replication + /// logic. sync_manager: SyncManager, + + /// Async stream giving us a regular interval to initiate new replication sessions. scheduler: Ticker, + + /// Receiver for messages from other services, for example the networking layer. tx: ServiceSender, + + /// Sender for messages to other services. rx: BroadcastStream, + + /// Provider to retreive our currently supported schema ids. schema_provider: SchemaProvider, } @@ -108,7 +121,7 @@ impl ConnectionManager { } } - /// Define set of schema ids we are interested in. + /// Returns set of schema ids we are interested in and support on this node. async fn target_set(&self) -> TargetSet { let supported_schema_ids: Vec = self .schema_provider @@ -244,7 +257,7 @@ impl ConnectionManager { .collect(); // Check if we're running too many sessions with that peer on this connection already - if active_sessions.len() < MAX_SESSIONS_PER_CONNECTION { + if active_sessions.len() < MAX_SESSIONS_PER_PEER { Some(peer_id) } else { debug!("Max sessions reached for peer: {:?}", peer_id); From 7bb4cb2c5f394a9583e27e0a516ab17b6095624a Mon Sep 17 00:00:00 2001 From: adz Date: Sat, 3 Jun 2023 12:00:56 +0200 Subject: [PATCH 077/126] Fix formatting --- .../src/network/replication/behaviour.rs | 23 ++++--------------- 1 file changed, 5 insertions(+), 18 deletions(-) diff --git a/aquadoggo/src/network/replication/behaviour.rs b/aquadoggo/src/network/replication/behaviour.rs index dcc6011ad..8e192c784 100644 --- a/aquadoggo/src/network/replication/behaviour.rs +++ b/aquadoggo/src/network/replication/behaviour.rs @@ -40,11 +40,7 @@ impl Behaviour { } impl Behaviour { - pub fn send_message( - &mut self, - peer_id: PeerId, - message: SyncMessage, - ) { + pub fn send_message(&mut self, peer_id: PeerId, message: SyncMessage) { trace!( "Notify handler of sent sync message: {peer_id} {}", message.display() @@ -56,19 +52,14 @@ impl Behaviour { }); } - fn handle_received_message( - &mut self, - peer_id: &PeerId, - message: SyncMessage, - ) { + fn handle_received_message(&mut self, peer_id: &PeerId, message: SyncMessage) { trace!( "Notify swarm of received sync message: {peer_id} {}", message.display() ); self.events .push_back(ToSwarm::GenerateEvent(Event::MessageReceived( - *peer_id, - message, + *peer_id, message, ))); } } @@ -120,9 +111,7 @@ impl NetworkBehaviour for Behaviour { }) => { if remaining_established == 0 { self.events - .push_back(ToSwarm::GenerateEvent(Event::PeerDisconnected( - peer_id, - ))); + .push_back(ToSwarm::GenerateEvent(Event::PeerDisconnected(peer_id))); } } FromSwarm::ConnectionEstablished(ConnectionEstablished { @@ -138,9 +127,7 @@ impl NetworkBehaviour for Behaviour { ); } else { self.events - .push_back(ToSwarm::GenerateEvent(Event::PeerConnected( - peer_id, - ))); + .push_back(ToSwarm::GenerateEvent(Event::PeerConnected(peer_id))); } } FromSwarm::AddressChange(_) From e550d531d4ce0e620dc8c2262a50fddaef4711e8 Mon Sep 17 00:00:00 2001 From: adz Date: Sat, 3 Jun 2023 12:16:28 +0200 Subject: [PATCH 078/126] Fix missing peer id in e2e test --- aquadoggo/src/config.rs | 19 +++++++++++++++++++ aquadoggo/src/network/identity.rs | 10 +++------- aquadoggo/src/network/mod.rs | 2 +- aquadoggo/src/tests.rs | 5 +---- 4 files changed, 24 insertions(+), 12 deletions(-) diff --git a/aquadoggo/src/config.rs b/aquadoggo/src/config.rs index 228960c69..eb3aa62cd 100644 --- a/aquadoggo/src/config.rs +++ b/aquadoggo/src/config.rs @@ -99,9 +99,28 @@ impl Configuration { }; // Derive peer id from key pair + // @TODO: This needs refactoring: https://github.com/p2panda/aquadoggo/issues/388 let key_pair = NetworkConfiguration::load_or_generate_key_pair(config.base_path.clone())?; config.network.set_peer_id(&key_pair.public()); Ok(config) } } + +#[cfg(test)] +impl Configuration { + /// Returns a new configuration object for a node which stores all data temporarily in memory. + pub fn new_ephemeral() -> Self { + let mut config = Configuration { + database_url: Some("sqlite::memory:".to_string()), + ..Default::default() + }; + + // Generate a random key pair and just keep it in memory + // @TODO: This needs refactoring: https://github.com/p2panda/aquadoggo/issues/388 + let key_pair: libp2p::identity::Keypair = crate::network::identity::Identity::new(); + config.network.set_peer_id(&key_pair.public()); + + config + } +} diff --git a/aquadoggo/src/network/identity.rs b/aquadoggo/src/network/identity.rs index 9f49909e1..a5594deef 100644 --- a/aquadoggo/src/network/identity.rs +++ b/aquadoggo/src/network/identity.rs @@ -2,7 +2,7 @@ use std::fs; use std::fs::File; -use std::io::prelude::*; +use std::io::{Read, Write}; use std::os::unix::fs::PermissionsExt; use std::path::Path; @@ -27,6 +27,8 @@ pub trait Identity { Self: Sized; } +// @TODO: This should use our p2panda `KeyPair` type and in general be handled outside the libp2p +// context. Related issue: https://github.com/p2panda/aquadoggo/issues/388 impl Identity for Keypair { /// Generate a new Ed25519 key pair. fn new() -> Self { @@ -47,11 +49,9 @@ impl Identity for Keypair { // See: https://github.com/p2panda/aquadoggo/issues/295 #[allow(deprecated)] fn save(&self, path: &Path) -> Result<()> { - // Retrieve the private key from the key pair let private_key = match self { Keypair::Ed25519(key_pair) => key_pair.secret(), }; - // Encode the private key let encoded_private_key = hex::encode(private_key); fs::create_dir_all(path.parent().unwrap())?; @@ -73,16 +73,12 @@ impl Identity for Keypair { where Self: Sized, { - // Read the key pair from file let mut file = File::open(path)?; let mut contents = String::new(); file.read_to_string(&mut contents)?; - // Decode the private key let private_key_bytes = hex::decode(contents)?; - // Convert the private key bytes into a `SecretKey` let private_key = ed25519::SecretKey::from_bytes(private_key_bytes)?; - // Derive a key pair from the private key let key_pair = Keypair::Ed25519(private_key.into()); Ok(key_pair) diff --git a/aquadoggo/src/network/mod.rs b/aquadoggo/src/network/mod.rs index dcb56bf17..7b34116b5 100644 --- a/aquadoggo/src/network/mod.rs +++ b/aquadoggo/src/network/mod.rs @@ -2,7 +2,7 @@ mod behaviour; mod config; -mod identity; +pub mod identity; mod replication; mod service; mod swarm; diff --git a/aquadoggo/src/tests.rs b/aquadoggo/src/tests.rs index 613c46bf9..57718facf 100644 --- a/aquadoggo/src/tests.rs +++ b/aquadoggo/src/tests.rs @@ -43,10 +43,7 @@ async fn e2e() { // default options. The only thing we want to do change is the database config. We want an // in-memory sqlite database for this test. - let config = Configuration { - database_url: Some("sqlite::memory:".to_string()), - ..Default::default() - }; + let config = Configuration::new_ephemeral(); // Start the node. // From bbc61402c1a6ef63ad9269bc223b5dc2218426c4 Mon Sep 17 00:00:00 2001 From: adz Date: Sat, 3 Jun 2023 12:33:30 +0200 Subject: [PATCH 079/126] Remove unnecessary type casting in entry SQL --- aquadoggo/src/db/stores/entry.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aquadoggo/src/db/stores/entry.rs b/aquadoggo/src/db/stores/entry.rs index 7b2d47412..c1c2001d4 100644 --- a/aquadoggo/src/db/stores/entry.rs +++ b/aquadoggo/src/db/stores/entry.rs @@ -299,7 +299,7 @@ impl EntryStore for SqlStore { WHERE public_key = $1 AND log_id = $2 - AND CAST(seq_num AS NUMERIC) IN ({}) + AND seq_num IN ({}) ORDER BY CAST(seq_num AS NUMERIC) DESC ", @@ -336,7 +336,7 @@ impl SqlStore { WHERE logs.schema = $1 GROUP BY - entries.public_key, CAST(entries.log_id AS NUMERIC) + entries.public_key, entries.log_id ORDER BY entries.public_key, CAST(entries.log_id AS NUMERIC) ", From 996f95e48f5db0b3ae8b46bc4d74219c253be42c Mon Sep 17 00:00:00 2001 From: adz Date: Sat, 3 Jun 2023 12:44:46 +0200 Subject: [PATCH 080/126] Give error logging more context --- aquadoggo/src/network/replication/handler.rs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/aquadoggo/src/network/replication/handler.rs b/aquadoggo/src/network/replication/handler.rs index c6f3f26ba..c6187bfcd 100644 --- a/aquadoggo/src/network/replication/handler.rs +++ b/aquadoggo/src/network/replication/handler.rs @@ -213,7 +213,8 @@ impl ConnectionHandler for Handler { )); } Poll::Ready(Some(Err(err))) => { - warn!("{err:#?}"); + warn!("Error decoding inbound message: {err:#?}"); + // More serious errors, close this side of the stream. If the peer is // still around, they will re-establish their connection self.inbound_substream = @@ -235,11 +236,10 @@ impl ConnectionHandler for Handler { match Sink::poll_close(Pin::new(&mut substream), cx) { Poll::Ready(res) => { if res.is_err() { - warn!("{res:#?}") // Don't close the connection but just drop the inbound substream. // In case the remote has more to send, they will open up a new // substream. - // @TODO: Log error here + warn!("Error during closing inbound connection: {res:#?}") } self.inbound_substream = None; @@ -295,7 +295,8 @@ impl ConnectionHandler for Handler { Some(OutboundSubstreamState::PendingFlush(substream)) } Err(err) => { - warn!("{err:#?}"); + warn!("Error sending outbound message: {err:#?}"); + return Poll::Ready(ConnectionHandlerEvent::Close( HandlerError::Codec(err), )); @@ -303,7 +304,8 @@ impl ConnectionHandler for Handler { } } Poll::Ready(Err(err)) => { - warn!("{err:#?}"); + warn!("Error encoding outbound message: {err:#?}"); + return Poll::Ready(ConnectionHandlerEvent::Close( HandlerError::Codec(err), )); @@ -322,7 +324,8 @@ impl ConnectionHandler for Handler { Some(OutboundSubstreamState::WaitingOutput(substream)) } Poll::Ready(Err(err)) => { - warn!("{err:#?}"); + warn!("Error flushing outbound message: {err:#?}"); + return Poll::Ready(ConnectionHandlerEvent::Close( HandlerError::Codec(err), )); From 2969a912187b27ea07b4db2269e63dbbb9c31ae7 Mon Sep 17 00:00:00 2001 From: adz Date: Sat, 3 Jun 2023 12:46:57 +0200 Subject: [PATCH 081/126] Fix SQL query by making seq_num IN values a string --- aquadoggo/src/db/stores/entry.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/aquadoggo/src/db/stores/entry.rs b/aquadoggo/src/db/stores/entry.rs index c1c2001d4..5bb7f39d4 100644 --- a/aquadoggo/src/db/stores/entry.rs +++ b/aquadoggo/src/db/stores/entry.rs @@ -277,14 +277,14 @@ impl EntryStore for SqlStore { log_id: &LogId, initial_seq_num: &SeqNum, ) -> Result, EntryStorageError> { + // Formatting query string in this way as `sqlx` currently doesn't support binding list + // arguments for IN queries. let cert_pool_seq_nums = get_lipmaa_links_back_to(initial_seq_num.as_u64(), 1) .iter() - .map(|seq_num| seq_num.to_string()) + .map(|seq_num| format!("\"{}\"", seq_num)) .collect::>() .join(","); - // Formatting query string in this way as `sqlx` currently - // doesn't support binding list arguments for IN queries. let sql_str = format!( "SELECT public_key, From a48bce7d0622e90bbd092d348b4f75277a038f5c Mon Sep 17 00:00:00 2001 From: adz Date: Sat, 3 Jun 2023 13:52:45 +0200 Subject: [PATCH 082/126] Try different string literal --- aquadoggo/src/db/stores/entry.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aquadoggo/src/db/stores/entry.rs b/aquadoggo/src/db/stores/entry.rs index 5bb7f39d4..e9c3d7978 100644 --- a/aquadoggo/src/db/stores/entry.rs +++ b/aquadoggo/src/db/stores/entry.rs @@ -281,7 +281,7 @@ impl EntryStore for SqlStore { // arguments for IN queries. let cert_pool_seq_nums = get_lipmaa_links_back_to(initial_seq_num.as_u64(), 1) .iter() - .map(|seq_num| format!("\"{}\"", seq_num)) + .map(|seq_num| format!("'{seq_num}'")) .collect::>() .join(","); From 19bf8659fcd923c2a262cb40327ce8dbd6006bbd Mon Sep 17 00:00:00 2001 From: adz Date: Sat, 3 Jun 2023 13:57:54 +0200 Subject: [PATCH 083/126] Use IntervalStream from tokio for scheduler --- Cargo.lock | 12 ------------ aquadoggo/Cargo.toml | 1 - aquadoggo/src/replication/service.rs | 8 ++++---- 3 files changed, 4 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 81f11f228..ec634c091 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -180,7 +180,6 @@ dependencies = [ "env_logger", "envy", "futures", - "futures-ticker", "hex", "http", "hyper", @@ -1789,17 +1788,6 @@ version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" -[[package]] -name = "futures-ticker" -version = "0.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9763058047f713632a52e916cc7f6a4b3fc6e9fc1ff8c5b1dc49e5a89041682e" -dependencies = [ - "futures", - "futures-timer", - "instant", -] - [[package]] name = "futures-timer" version = "3.0.2" diff --git a/aquadoggo/Cargo.toml b/aquadoggo/Cargo.toml index 025990dcd..0745042cd 100644 --- a/aquadoggo/Cargo.toml +++ b/aquadoggo/Cargo.toml @@ -31,7 +31,6 @@ directories = "4.0.1" dynamic-graphql = "0.7.3" envy = "0.4.2" futures = "0.3.23" -futures-ticker = "0.0.3" hex = "0.4.3" http = "0.2.9" libp2p = { version = "0.51.3", features = [ diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index 3e48fc4e4..142f134eb 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -4,12 +4,12 @@ use std::collections::HashMap; use std::time::Duration; use anyhow::Result; -use futures_ticker::Ticker; use libp2p::PeerId; use log::{debug, info, warn}; use p2panda_rs::schema::SchemaId; use tokio::task; -use tokio_stream::wrappers::BroadcastStream; +use tokio::time::interval; +use tokio_stream::wrappers::{BroadcastStream, IntervalStream}; use tokio_stream::StreamExt; use crate::bus::{ServiceMessage, ServiceSender}; @@ -88,7 +88,7 @@ struct ConnectionManager { sync_manager: SyncManager, /// Async stream giving us a regular interval to initiate new replication sessions. - scheduler: Ticker, + scheduler: IntervalStream, /// Receiver for messages from other services, for example the networking layer. tx: ServiceSender, @@ -109,7 +109,7 @@ impl ConnectionManager { ) -> Self { let ingest = SyncIngest::new(schema_provider.clone(), tx.clone()); let sync_manager = SyncManager::new(store.clone(), ingest, local_peer_id); - let scheduler = Ticker::new(UPDATE_INTERVAL); + let scheduler = IntervalStream::new(interval(UPDATE_INTERVAL)); Self { peers: HashMap::new(), From 722a4b3c623bdc62c7521dfb0ff44ce049c64e5b Mon Sep 17 00:00:00 2001 From: adz Date: Sat, 3 Jun 2023 14:24:59 +0200 Subject: [PATCH 084/126] Add doc strings --- aquadoggo/src/network/replication/behaviour.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/aquadoggo/src/network/replication/behaviour.rs b/aquadoggo/src/network/replication/behaviour.rs index 8e192c784..38dad2f5c 100644 --- a/aquadoggo/src/network/replication/behaviour.rs +++ b/aquadoggo/src/network/replication/behaviour.rs @@ -21,8 +21,10 @@ pub enum Event { /// Replication message received on the inbound stream. MessageReceived(PeerId, SyncMessage), + /// We established an inbound or outbound connection to a peer for the first time. PeerConnected(PeerId), + /// Peer does not have any inbound or outbound connections left with us. PeerDisconnected(PeerId), } From ba0548485a4f5b09c1d333e789d30e6239682130 Mon Sep 17 00:00:00 2001 From: adz Date: Sat, 3 Jun 2023 16:41:29 +0200 Subject: [PATCH 085/126] Fix filtering active sessions logic --- aquadoggo/src/replication/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index 142f134eb..d4968fa32 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -253,7 +253,7 @@ impl ConnectionManager { let sessions = self.sync_manager.get_sessions(&peer_id); let active_sessions: Vec<&Session> = sessions .iter() - .filter(|session| session.is_done()) + .filter(|session| !session.is_done()) .collect(); // Check if we're running too many sessions with that peer on this connection already From 84587f29255159b9c78c07d4a0045f9850952812 Mon Sep 17 00:00:00 2001 From: adz Date: Sun, 4 Jun 2023 09:44:12 +0200 Subject: [PATCH 086/126] Update comments --- aquadoggo/src/replication/manager.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/aquadoggo/src/replication/manager.rs b/aquadoggo/src/replication/manager.rs index b321b78be..0f9e307ac 100644 --- a/aquadoggo/src/replication/manager.rs +++ b/aquadoggo/src/replication/manager.rs @@ -203,8 +203,8 @@ where existing_session: &Session, ) -> Result { match existing_session.local { - // Remote peer sent a sync request for an already pending inbound session, we should ignore - // this second request. + // Remote peer sent a sync request for an already pending inbound session, we should + // ignore this second request. false => Err(DuplicateSessionRequestError::InboundPendingSession( existing_session.id, )), @@ -277,8 +277,8 @@ where existing_session: &Session, ) -> Result { match existing_session.local { - // Remote peer sent a sync request for an already pending inbound session, we should ignore - // this second request. + // Remote peer sent a sync request for an already pending inbound session, we should + // ignore this second request. false => Err(DuplicateSessionRequestError::InboundExistingTargetSet( existing_session.target_set(), )), @@ -344,9 +344,10 @@ where let sessions = self.get_sessions(remote_peer); - // Check if a session with this id already exists for this peer, this can happen if both - // peers started to initiate a session at the same time, or if the remote peer sent two - // sync request messages with the same session id. + // Check if a session with this id already exists for this peer. + // + // This can happen if both peers started to initiate a session at the same time, or if the + // remote peer sent two sync request messages with the same session id. if let Some(existing_session) = sessions .iter() .find(|existing_session| existing_session.id == *session_id) @@ -357,8 +358,7 @@ where .await; } - // Check if a session with this target set already exists for this peer, this always gets - // rejected because it is clearly redundant + // Check if a session with this target set already exists for this peer. if let Some(session) = sessions .iter() .find(|session| session.target_set() == *target_set) From 8d242686ae0e8544e7270b09db8f9f224751c2f5 Mon Sep 17 00:00:00 2001 From: adz Date: Sun, 4 Jun 2023 09:44:58 +0200 Subject: [PATCH 087/126] Remove repeating debug log --- aquadoggo/src/replication/manager.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/aquadoggo/src/replication/manager.rs b/aquadoggo/src/replication/manager.rs index 0f9e307ac..59b388d4e 100644 --- a/aquadoggo/src/replication/manager.rs +++ b/aquadoggo/src/replication/manager.rs @@ -244,10 +244,6 @@ where let mut all_messages: Vec = vec![]; if accept_inbound_request { - debug!( - "Accept duplicate session request with id {} for peer {:?}", - existing_session.id, remote_peer - ); let messages = self .insert_and_initialize_session( remote_peer, From 50425933461bed5ba34607581a373ee8062a4c22 Mon Sep 17 00:00:00 2001 From: adz Date: Sun, 4 Jun 2023 09:47:36 +0200 Subject: [PATCH 088/126] Re-initiate dropped session if its concerning a different target set --- aquadoggo/src/replication/manager.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/aquadoggo/src/replication/manager.rs b/aquadoggo/src/replication/manager.rs index 59b388d4e..f21e26016 100644 --- a/aquadoggo/src/replication/manager.rs +++ b/aquadoggo/src/replication/manager.rs @@ -255,8 +255,14 @@ where .await; all_messages.extend(to_sync_messages(existing_session.id, messages)); - // @TODO: Do we want to re-initiate the dropped session if it was concerning a - // different target set? + // If we dropped our own outbound session request regarding a different target set, we + // need to re-establish it with another session id, otherwise it would get lost + if existing_session.target_set() != *target_set { + let messages = self + .initiate_session(remote_peer, target_set, &existing_session.mode()) + .await?; + all_messages.extend(messages) + } } Ok(SyncResult { From 79ceacc038fc4f1c0195abcf8332274e17a97b01 Mon Sep 17 00:00:00 2001 From: adz Date: Sun, 4 Jun 2023 10:10:28 +0200 Subject: [PATCH 089/126] Allow max 3 sessions per peer and max one for the same target set --- aquadoggo/src/replication/service.rs | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index d4968fa32..ee6ad753c 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -244,6 +244,9 @@ impl ConnectionManager { } async fn update_sessions(&mut self) { + // Determine the target set our node is interested in + let target_set = self.target_set().await; + // Iterate through all currently connected peers let attempt_peers: Vec = self .peers @@ -251,16 +254,24 @@ impl ConnectionManager { .into_iter() .filter_map(|(peer_id, _)| { let sessions = self.sync_manager.get_sessions(&peer_id); + + // 1. Check if we're running too many sessions with that peer on this connection + // already. This limit is configurable. let active_sessions: Vec<&Session> = sessions .iter() .filter(|session| !session.is_done()) .collect(); - // Check if we're running too many sessions with that peer on this connection already - if active_sessions.len() < MAX_SESSIONS_PER_PEER { + // 2. Check if we're already having at least one session concerning the same target + // set. If we would start that session again it would be considered an error. + let has_active_target_set_session = active_sessions + .iter() + .any(|session| session.target_set() == target_set); + + if active_sessions.len() < MAX_SESSIONS_PER_PEER && !has_active_target_set_session { Some(peer_id) } else { - debug!("Max sessions reached for peer: {:?}", peer_id); + debug!("Max sessions reached for peer: {peer_id}"); None } }) @@ -271,16 +282,14 @@ impl ConnectionManager { } for peer_id in attempt_peers { - self.initiate_replication(&peer_id).await; + self.initiate_replication(&peer_id, &target_set).await; } } - async fn initiate_replication(&mut self, peer_id: &PeerId) { - let target_set = self.target_set().await; - + async fn initiate_replication(&mut self, peer_id: &PeerId, target_set: &TargetSet) { match self .sync_manager - .initiate_session(peer_id, &target_set, &Mode::Naive) + .initiate_session(peer_id, target_set, &Mode::Naive) .await { Ok(messages) => { From c7655f4ed1c701895f94d8028c36b17dceb6b164 Mon Sep 17 00:00:00 2001 From: adz Date: Sun, 4 Jun 2023 11:57:18 +0200 Subject: [PATCH 090/126] Update test and fix bug in re-initiating session logic --- aquadoggo/src/replication/manager.rs | 164 ++++++++++++++++++++------- 1 file changed, 124 insertions(+), 40 deletions(-) diff --git a/aquadoggo/src/replication/manager.rs b/aquadoggo/src/replication/manager.rs index f21e26016..be81fd306 100644 --- a/aquadoggo/src/replication/manager.rs +++ b/aquadoggo/src/replication/manager.rs @@ -259,7 +259,11 @@ where // need to re-establish it with another session id, otherwise it would get lost if existing_session.target_set() != *target_set { let messages = self - .initiate_session(remote_peer, target_set, &existing_session.mode()) + .initiate_session( + remote_peer, + &existing_session.target_set(), + &existing_session.mode(), + ) .await?; all_messages.extend(messages) } @@ -596,17 +600,25 @@ mod tests { // // ========== PEER A REQUEST DROPPED =========== // - // Have([..]) ─────────────────────────────────► + // 0 Have([..]) ───────────────────────────────► + // 0 SyncDone(false) ──────────────────────────► // - // Done(false) ───────────┐ - // │ - // ◄──────────────────────┼────────── Have([..]) - // │ - // ◄──────────────────────┼───────── Done(false) - // │ - // └────────────────────► + // ====== PEER A REPEATS WITH NEW SESS ID ====== // - // ============== SESSION CLOSED =============== + // SyncRequest(1, 0, ["A"])────────────────────► + // + // ◄─────────────────────────────── 0 Have([..]) + // ◄────────────────────────── 0 SyncDone(false) + // + // ============ SESSION 0 CLOSED =============== + // + // ◄─────────────────────────────── 1 Have([..]) + // ◄────────────────────────── 1 SyncDone(false) + // + // 1 Have([..]) ───────────────────────────────► + // 1 SyncDone(false) ──────────────────────────► + // + // ============ SESSION 1 CLOSED =============== #[rstest] fn concurrent_requests_duplicate_session_ids( #[from(random_target_set)] target_set_1: TargetSet, @@ -617,61 +629,77 @@ mod tests { let (tx, _rx) = broadcast::channel(8); let ingest = SyncIngest::new(SchemaProvider::default(), tx); - // Local peer id is < than remote, this is important for testing the deterministic - // handling of concurrent session requests which contain the same session id. + // Sanity check: Id of peer A is < id of peer B. + // + // This is important for testing the deterministic handling of concurrent session + // requests which contain the same session id. assert!(PEER_ID_LOCAL < PEER_ID_REMOTE); - // Local peer A initiates a session with id 0. + // Sanity check: Target sets need to be different + assert!(target_set_1 != target_set_2); + + // Local peer A initiates a session with id 0 and target set 1. let mut manager_a = SyncManager::new(node.context.store.clone(), ingest.clone(), PEER_ID_LOCAL); let result = manager_a .initiate_session(&PEER_ID_REMOTE, &target_set_1, &mode) - .await; + .await + .unwrap(); - let sync_messages = result.unwrap(); - assert_eq!(sync_messages.len(), 1); - let sync_request_a = sync_messages[0].clone(); + assert_eq!(result.len(), 1); + let sync_request_a = result[0].clone(); - // Remote peer B initiates a session with id 0. + // Remote peer B initiates a session with id 0 and target set 2. + // + // Note that both peers use the _same_ session id but _different_ target sets. let mut manager_b = SyncManager::new(node.context.store.clone(), ingest, PEER_ID_REMOTE); let result = manager_b .initiate_session(&PEER_ID_LOCAL, &target_set_2, &mode) - .await; + .await + .unwrap(); - let sync_messages = result.unwrap(); - assert_eq!(sync_messages.len(), 1); - let sync_request_b = sync_messages[0].clone(); + assert_eq!(result.len(), 1); + let sync_request_b = result[0].clone(); // Both peers send and handle the requests concurrently. let result = manager_a .handle_message(&PEER_ID_REMOTE, &sync_request_b) - .await; - let response = result.unwrap(); + .await + .unwrap(); - // We expect Peer A to drop their pending outgoing session and respond to the request - // from Peer B. - assert_eq!(response.messages.len(), 2); - let (have_message_a, done_message_a) = - (response.messages[0].clone(), response.messages[1].clone()); + // We expect Peer A to: + // + // 1. Drop their pending outgoing session + // 2. Respond to the request from Peer B + // 3. Send another sync request for the other target set with a corrected session id + assert_eq!(result.messages.len(), 3); + let (have_message_a, done_message_a, sync_request_a_corrected) = ( + result.messages[0].clone(), + result.messages[1].clone(), + result.messages[2].clone(), + ); let result = manager_b .handle_message(&PEER_ID_LOCAL, &sync_request_a) - .await; - let response = result.unwrap(); + .await + .unwrap(); - // We expect Peer B to drop the incomming request from Peer A and simply wait - // for a response from it's original request. - assert_eq!(response.messages.len(), 0); + // We expect Peer B to drop the incoming request from Peer A and simply wait for a + // response from it's original request. + assert_eq!(result.messages.len(), 0); - // Both peers have exactly one session running. + // Peer A has two sessions running: The one initiated by Peer B and the one it + // re-initiated itself with the new session id let manager_a_sessions = manager_a.get_sessions(&PEER_ID_REMOTE); - assert_eq!(manager_a_sessions.len(), 1); + assert_eq!(manager_a_sessions.len(), 2); + // Peer B has still one running, it didn't learn about the re-initiated session of A + // yet let manager_b_sessions = manager_b.get_sessions(&PEER_ID_LOCAL); assert_eq!(manager_b_sessions.len(), 1); - // Peer B processes the `Have` and `SyncDone` messages from Peer A. + // Peer B processes the `Have`, `SyncDone` and `SyncRequest` messages from Peer A. let result = manager_b .handle_message(&PEER_ID_LOCAL, &have_message_a) .await; @@ -689,8 +717,29 @@ mod tests { let response = result.unwrap(); assert_eq!(response.messages.len(), 0); - // Peer A processes both the `Have` and `SyncDone` messages from Peer B and produces - // no new messages. + // Peer B should have closed the session for good + let manager_b_sessions = manager_b.get_sessions(&PEER_ID_LOCAL); + assert_eq!(manager_b_sessions.len(), 0); + + // Now the second, re-established sync request from peer A concerning another target + // set arrives at peer B + let result = manager_b + .handle_message(&PEER_ID_LOCAL, &sync_request_a_corrected) + .await; + let response = result.unwrap(); + assert_eq!(response.messages.len(), 2); + + // They send their own `Have` and `SyncDone` messages for the corrected target set + let (have_message_b_corrected, done_message_b_corrected) = + (response.messages[0].clone(), response.messages[1].clone()); + + // Peer B should now know about one session again + let manager_b_sessions = manager_b.get_sessions(&PEER_ID_LOCAL); + assert_eq!(manager_b_sessions.len(), 1); + + // Peer A processes both the `Have` and `SyncDone` messages from Peer B for the first + // session and produces no new messages. We're done with this session on Peer A as + // well now. let result = manager_a .handle_message(&PEER_ID_REMOTE, &have_message_b) .await; @@ -703,6 +752,41 @@ mod tests { let response = result.unwrap(); assert_eq!(response.messages.len(), 0); + // Peer A should now know about one session again + let manager_a_sessions = manager_a.get_sessions(&PEER_ID_REMOTE); + assert_eq!(manager_a_sessions.len(), 1); + + // Peer A processes both the re-initiated sessions `Have` and `SyncDone` messages from + // Peer B and produces its own answer. + let result = manager_a + .handle_message(&PEER_ID_REMOTE, &have_message_b_corrected) + .await; + let response = result.unwrap(); + assert_eq!(response.messages.len(), 2); + + let (have_message_a_corrected, done_message_a_corrected) = + (response.messages[0].clone(), response.messages[1].clone()); + + let result = manager_a + .handle_message(&PEER_ID_REMOTE, &done_message_b_corrected) + .await; + let response = result.unwrap(); + assert_eq!(response.messages.len(), 0); + + // Peer B processes both the re-initiated `Have` and `SyncDone` messages from Peer A + // and produces no new messages. + let result = manager_b + .handle_message(&PEER_ID_LOCAL, &have_message_a_corrected) + .await; + let response = result.unwrap(); + assert_eq!(response.messages.len(), 0); + + let result = manager_b + .handle_message(&PEER_ID_LOCAL, &done_message_a_corrected) + .await; + let response = result.unwrap(); + assert_eq!(response.messages.len(), 0); + // After processing all messages both peers should have no sessions remaining. let manager_a_sessions = manager_a.get_sessions(&PEER_ID_REMOTE); assert_eq!(manager_a_sessions.len(), 0); @@ -795,7 +879,7 @@ mod tests { .await; let response = result.unwrap(); - // We expect Peer B to drop the incomming request from Peer A and simply wait + // We expect Peer B to drop the incoming request from Peer A and simply wait // for a response from it's original request. assert_eq!(response.messages.len(), 0); From a3f4b892ffdda1287b784eb0a2a79b6c686751bc Mon Sep 17 00:00:00 2001 From: adz Date: Sun, 4 Jun 2023 17:05:39 +0200 Subject: [PATCH 091/126] Correct diagram --- aquadoggo/src/replication/manager.rs | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/aquadoggo/src/replication/manager.rs b/aquadoggo/src/replication/manager.rs index be81fd306..2edf8fc4a 100644 --- a/aquadoggo/src/replication/manager.rs +++ b/aquadoggo/src/replication/manager.rs @@ -601,22 +601,30 @@ mod tests { // ========== PEER A REQUEST DROPPED =========== // // 0 Have([..]) ───────────────────────────────► - // 0 SyncDone(false) ──────────────────────────► // - // ====== PEER A REPEATS WITH NEW SESS ID ====== + // 0 SyncDone(false) ─────┐ + // │ + // ◄──────────────────────┼──────── 0 Have([..]) + // │ + // ◄──────────────────────┼─── 0 SyncDone(false) + // │ + // └────────────────────► // - // SyncRequest(1, 0, ["A"])────────────────────► + // ============ SESSION 0 CLOSED =============== // - // ◄─────────────────────────────── 0 Have([..]) - // ◄────────────────────────── 0 SyncDone(false) + // ====== PEER A REPEATS WITH NEW SESS ID ====== // - // ============ SESSION 0 CLOSED =============== + // SyncRequest(1, 0, ["A"])────────────────────► // // ◄─────────────────────────────── 1 Have([..]) - // ◄────────────────────────── 1 SyncDone(false) // - // 1 Have([..]) ───────────────────────────────► - // 1 SyncDone(false) ──────────────────────────► + // ┌─── 1 SyncDone(false) + // │ + // 1 Have([..]) ──────────┼────────────────────► + // │ + // 1 SyncDone(false) ─────┼────────────────────► + // │ + // ◄──────────────────────┘ // // ============ SESSION 1 CLOSED =============== #[rstest] From ec8bdd2fb84c8218eac9f7d66bff300f15a93265 Mon Sep 17 00:00:00 2001 From: adz Date: Sun, 4 Jun 2023 22:13:16 +0200 Subject: [PATCH 092/126] Inform connection handler about replication errors, introduce timeout --- aquadoggo/src/bus.rs | 3 ++ .../src/network/replication/behaviour.rs | 31 +++++++---- aquadoggo/src/network/replication/handler.rs | 53 ++++++++++++------- aquadoggo/src/network/service.rs | 16 ++++-- aquadoggo/src/replication/service.rs | 9 ++-- 5 files changed, 72 insertions(+), 40 deletions(-) diff --git a/aquadoggo/src/bus.rs b/aquadoggo/src/bus.rs index 7b4bf6863..bed340daf 100644 --- a/aquadoggo/src/bus.rs +++ b/aquadoggo/src/bus.rs @@ -26,4 +26,7 @@ pub enum ServiceMessage { /// Node received a message from remote node for replication. ReceivedReplicationMessage(PeerId, SyncMessage), + + /// Replication protocol failed with an critical error. + ReplicationFailed(PeerId), } diff --git a/aquadoggo/src/network/replication/behaviour.rs b/aquadoggo/src/network/replication/behaviour.rs index 38dad2f5c..88412f184 100644 --- a/aquadoggo/src/network/replication/behaviour.rs +++ b/aquadoggo/src/network/replication/behaviour.rs @@ -39,9 +39,18 @@ impl Behaviour { events: VecDeque::new(), } } -} -impl Behaviour { + fn handle_received_message(&mut self, peer_id: &PeerId, message: SyncMessage) { + trace!( + "Notify swarm of received sync message: {peer_id} {}", + message.display() + ); + self.events + .push_back(ToSwarm::GenerateEvent(Event::MessageReceived( + *peer_id, message, + ))); + } + pub fn send_message(&mut self, peer_id: PeerId, message: SyncMessage) { trace!( "Notify handler of sent sync message: {peer_id} {}", @@ -54,15 +63,15 @@ impl Behaviour { }); } - fn handle_received_message(&mut self, peer_id: &PeerId, message: SyncMessage) { - trace!( - "Notify swarm of received sync message: {peer_id} {}", - message.display() - ); - self.events - .push_back(ToSwarm::GenerateEvent(Event::MessageReceived( - *peer_id, message, - ))); + /// React to errors coming from the replication protocol living inside the replication service. + pub fn handle_error(&mut self, peer_id: PeerId) { + self.events.push_back(ToSwarm::NotifyHandler { + peer_id, + event: HandlerInEvent::ReplicationError, + // Inform all connections related to that peer, this means that all of them (inbound or + // outbound) will be closed + handler: NotifyHandler::Any, + }); } } diff --git a/aquadoggo/src/network/replication/handler.rs b/aquadoggo/src/network/replication/handler.rs index c6187bfcd..5aaeb066a 100644 --- a/aquadoggo/src/network/replication/handler.rs +++ b/aquadoggo/src/network/replication/handler.rs @@ -3,6 +3,7 @@ use std::collections::VecDeque; use std::pin::Pin; use std::task::{Context, Poll}; +use std::time::{Duration, Instant}; use asynchronous_codec::Framed; use futures::{Sink, StreamExt}; @@ -16,6 +17,10 @@ use thiserror::Error; use crate::network::replication::{Codec, CodecError, Protocol}; use crate::replication::SyncMessage; +/// The time a connection is maintained to a peer without being in live mode and without +/// send/receiving a message from. Connections that idle beyond this timeout are disconnected. +const IDLE_TIMEOUT: Duration = Duration::from_secs(30); + pub struct Handler { /// Upgrade configuration for the replication protocol. listen_protocol: SubstreamProtocol, @@ -33,8 +38,11 @@ pub struct Handler { /// Queue of messages that we want to send to the remote. send_queue: VecDeque, - /// Flag determining whether to maintain the connection to the peer. - keep_alive: KeepAlive, + /// Last time we've observed inbound or outbound messaging activity. + last_io_activity: Instant, + + /// Flag indicating if a critical replication error occurred. + critical_error: bool, } impl Handler { @@ -45,7 +53,8 @@ impl Handler { inbound_substream: None, outbound_substream_establishing: false, send_queue: VecDeque::new(), - keep_alive: KeepAlive::Yes, + last_io_activity: Instant::now(), + critical_error: false, } } @@ -75,6 +84,9 @@ impl Handler { pub enum HandlerInEvent { /// Replication message to send on outbound stream. Message(SyncMessage), + + /// Replication protocol failed with an error. + ReplicationError, } /// The event emitted by the connection handler. @@ -159,17 +171,23 @@ impl ConnectionHandler for Handler { } fn on_behaviour_event(&mut self, event: Self::InEvent) { - self.keep_alive = KeepAlive::Yes; - match event { HandlerInEvent::Message(message) => { self.send_queue.push_back(message); } + HandlerInEvent::ReplicationError => { + self.critical_error = true; + } } } fn connection_keep_alive(&self) -> KeepAlive { - self.keep_alive + // Close connection immediately on critical errors coming from replication service + if self.critical_error { + return KeepAlive::No; + } + + KeepAlive::Until(self.last_io_activity + IDLE_TIMEOUT) } fn poll( @@ -203,17 +221,18 @@ impl ConnectionHandler for Handler { Some(InboundSubstreamState::WaitingInput(mut substream)) => { match substream.poll_next_unpin(cx) { Poll::Ready(Some(Ok(message))) => { + self.last_io_activity = Instant::now(); + // Received message from remote peer self.inbound_substream = Some(InboundSubstreamState::WaitingInput(substream)); - self.keep_alive = KeepAlive::Yes; return Poll::Ready(ConnectionHandlerEvent::Custom( HandlerOutEvent::Message(message), )); } Poll::Ready(Some(Err(err))) => { - warn!("Error decoding inbound message: {err:#?}"); + warn!("Error decoding inbound message: {err}"); // More serious errors, close this side of the stream. If the peer is // still around, they will re-establish their connection @@ -235,19 +254,13 @@ impl ConnectionHandler for Handler { Some(InboundSubstreamState::Closing(mut substream)) => { match Sink::poll_close(Pin::new(&mut substream), cx) { Poll::Ready(res) => { - if res.is_err() { + if let Err(err) = res { // Don't close the connection but just drop the inbound substream. // In case the remote has more to send, they will open up a new // substream. - warn!("Error during closing inbound connection: {res:#?}") + warn!("Error during closing inbound connection: {err}") } - self.inbound_substream = None; - - if self.outbound_substream.is_none() { - self.keep_alive = KeepAlive::No; - } - break; } Poll::Pending => { @@ -278,6 +291,7 @@ impl ConnectionHandler for Handler { Some(message) => { self.outbound_substream = Some(OutboundSubstreamState::PendingSend(substream, message)); + continue; } None => { self.outbound_substream = @@ -295,7 +309,7 @@ impl ConnectionHandler for Handler { Some(OutboundSubstreamState::PendingFlush(substream)) } Err(err) => { - warn!("Error sending outbound message: {err:#?}"); + warn!("Error sending outbound message: {err}"); return Poll::Ready(ConnectionHandlerEvent::Close( HandlerError::Codec(err), @@ -304,7 +318,7 @@ impl ConnectionHandler for Handler { } } Poll::Ready(Err(err)) => { - warn!("Error encoding outbound message: {err:#?}"); + warn!("Error encoding outbound message: {err}"); return Poll::Ready(ConnectionHandlerEvent::Close( HandlerError::Codec(err), @@ -320,11 +334,12 @@ impl ConnectionHandler for Handler { Some(OutboundSubstreamState::PendingFlush(mut substream)) => { match Sink::poll_flush(Pin::new(&mut substream), cx) { Poll::Ready(Ok(())) => { + self.last_io_activity = Instant::now(); self.outbound_substream = Some(OutboundSubstreamState::WaitingOutput(substream)) } Poll::Ready(Err(err)) => { - warn!("Error flushing outbound message: {err:#?}"); + warn!("Error flushing outbound message: {err}"); return Poll::Ready(ConnectionHandlerEvent::Close( HandlerError::Codec(err), diff --git a/aquadoggo/src/network/service.rs b/aquadoggo/src/network/service.rs index 89bc8a7fe..d4044cfaa 100644 --- a/aquadoggo/src/network/service.rs +++ b/aquadoggo/src/network/service.rs @@ -158,11 +158,17 @@ impl EventLoop { /// Handle an incoming message via the communication bus from other services. async fn handle_service_message(&mut self, message: ServiceMessage) { - if let ServiceMessage::SentReplicationMessage(peer_id, sync_message) = message { - self.swarm - .behaviour_mut() - .replication - .send_message(peer_id, sync_message); + match message { + ServiceMessage::SentReplicationMessage(peer_id, sync_message) => { + self.swarm + .behaviour_mut() + .replication + .send_message(peer_id, sync_message); + } + ServiceMessage::ReplicationFailed(peer_id) => { + self.swarm.behaviour_mut().replication.handle_error(peer_id); + } + _ => (), } } diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index ee6ad753c..331e9f507 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -42,7 +42,6 @@ pub async fn replication_service( .peer_id .expect("Peer id needs to be given"); - // Run a connection manager which deals with the replication logic let manager = ConnectionManager::new(&context.schema_provider, &context.store, &tx, local_peer_id); let handle = task::spawn(manager.run()); @@ -53,10 +52,7 @@ pub async fn replication_service( tokio::select! { _ = handle => (), - _ = shutdown => { - // @TODO: Wait until all pending replication processes are completed during graceful - // shutdown - } + _ = shutdown => (), } Ok(()) @@ -219,6 +215,9 @@ impl ConnectionManager { } _ => (), // Don't try and close the session on other errors as it should not have been initiated } + + // Inform network service about error, so it can accordingly react + self.send_service_message(ServiceMessage::ReplicationFailed(peer_id)); } async fn handle_service_message(&mut self, message: ServiceMessage) { From a5fc9323ca050f809095d8dcb3458db17d083d25 Mon Sep 17 00:00:00 2001 From: adz Date: Sun, 4 Jun 2023 23:45:32 +0200 Subject: [PATCH 093/126] Close all connection handlers on critical errors --- .../src/network/replication/behaviour.rs | 12 +---- aquadoggo/src/network/replication/handler.rs | 45 ++++++++++++++----- aquadoggo/src/network/service.rs | 7 ++- 3 files changed, 40 insertions(+), 24 deletions(-) diff --git a/aquadoggo/src/network/replication/behaviour.rs b/aquadoggo/src/network/replication/behaviour.rs index 88412f184..483c7a6c3 100644 --- a/aquadoggo/src/network/replication/behaviour.rs +++ b/aquadoggo/src/network/replication/behaviour.rs @@ -10,7 +10,7 @@ use libp2p::swarm::{ PollParameters, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; use libp2p::{Multiaddr, PeerId}; -use log::{trace, warn}; +use log::trace; use p2panda_rs::Human; use crate::network::replication::handler::{Handler, HandlerInEvent, HandlerOutEvent}; @@ -68,8 +68,6 @@ impl Behaviour { self.events.push_back(ToSwarm::NotifyHandler { peer_id, event: HandlerInEvent::ReplicationError, - // Inform all connections related to that peer, this means that all of them (inbound or - // outbound) will be closed handler: NotifyHandler::Any, }); } @@ -130,13 +128,7 @@ impl NetworkBehaviour for Behaviour { other_established, .. }) => { - if other_established > 0 { - warn!( - "Multiple connections established to peer: {} {}", - other_established + 1, - peer_id - ); - } else { + if other_established == 0 { self.events .push_back(ToSwarm::GenerateEvent(Event::PeerConnected(peer_id))); } diff --git a/aquadoggo/src/network/replication/handler.rs b/aquadoggo/src/network/replication/handler.rs index 5aaeb066a..70ba09d55 100644 --- a/aquadoggo/src/network/replication/handler.rs +++ b/aquadoggo/src/network/replication/handler.rs @@ -41,7 +41,10 @@ pub struct Handler { /// Last time we've observed inbound or outbound messaging activity. last_io_activity: Instant, - /// Flag indicating if a critical replication error occurred. + /// Flag indicating that we want to close _all_ connection handlers related to that peer. + /// + /// This is useful in scenarios where a critical error occurred outside of the libp2p stack + /// (for example in the replication service) and we need to accordingly close all connections. critical_error: bool, } @@ -102,6 +105,12 @@ pub enum HandlerOutEvent { pub enum HandlerError { #[error("Failed to encode or decode CBOR")] Codec(#[from] CodecError), + + #[error("Critical replication protocol error")] + ReplicationError, + + #[error("Remote peer closed connection")] + RemotePeerDisconnected, } type Stream = Framed; @@ -182,9 +191,11 @@ impl ConnectionHandler for Handler { } fn connection_keep_alive(&self) -> KeepAlive { - // Close connection immediately on critical errors coming from replication service - if self.critical_error { - return KeepAlive::No; + if let Some( + OutboundSubstreamState::PendingSend(_, _) | OutboundSubstreamState::PendingFlush(_), + ) = self.outbound_substream + { + return KeepAlive::Yes; } KeepAlive::Until(self.last_io_activity + IDLE_TIMEOUT) @@ -201,6 +212,14 @@ impl ConnectionHandler for Handler { Self::Error, >, > { + if self.critical_error { + // Returning a `Close` event will inform all other handlers to close their connections + // to that peer + return Poll::Ready(ConnectionHandlerEvent::Close( + HandlerError::ReplicationError, + )); + } + // Determine if we need to create the outbound stream if !self.send_queue.is_empty() && self.outbound_substream.is_none() @@ -234,8 +253,8 @@ impl ConnectionHandler for Handler { Poll::Ready(Some(Err(err))) => { warn!("Error decoding inbound message: {err}"); - // More serious errors, close this side of the stream. If the peer is - // still around, they will re-establish their connection + // Close this side of the stream. If the peer is still around, they + // will re-establish their connection self.inbound_substream = Some(InboundSubstreamState::Closing(substream)); } @@ -255,13 +274,17 @@ impl ConnectionHandler for Handler { match Sink::poll_close(Pin::new(&mut substream), cx) { Poll::Ready(res) => { if let Err(err) = res { - // Don't close the connection but just drop the inbound substream. - // In case the remote has more to send, they will open up a new - // substream. warn!("Error during closing inbound connection: {err}") } + self.inbound_substream = None; - break; + + // Close all connection handlers because we can assume that the remote + // peer actively closed an existing connection and probably went + // offline + return Poll::Ready(ConnectionHandlerEvent::Close( + HandlerError::RemotePeerDisconnected, + )); } Poll::Pending => { self.inbound_substream = @@ -291,6 +314,8 @@ impl ConnectionHandler for Handler { Some(message) => { self.outbound_substream = Some(OutboundSubstreamState::PendingSend(substream, message)); + + // Continue loop in case there is more messages to be sent continue; } None => { diff --git a/aquadoggo/src/network/service.rs b/aquadoggo/src/network/service.rs index d4044cfaa..0e6c9117c 100644 --- a/aquadoggo/src/network/service.rs +++ b/aquadoggo/src/network/service.rs @@ -184,11 +184,10 @@ impl EventLoop { SwarmEvent::Dialing(peer_id) => info!("Dialing: {peer_id}"), SwarmEvent::ConnectionEstablished { peer_id, - endpoint, num_established, .. } => { - info!("ConnectionEstablished: {peer_id} {endpoint:?} {num_established}"); + info!("Established new connection (total {num_established}) with {peer_id}"); // Match on a connection with the rendezvous server if let Some(rendezvous_peer_id) = self.network_config.rendezvous_peer_id { @@ -210,11 +209,11 @@ impl EventLoop { } SwarmEvent::ConnectionClosed { peer_id, - endpoint, num_established, cause, + .. } => { - info!("ConnectionClosed: {peer_id} {endpoint:?} {num_established} {cause:?}"); + info!("Connection closed (total {num_established}) with {peer_id}: {cause:?}"); } SwarmEvent::ExpiredListenAddr { listener_id, From f7b894eaf82d8eff9072a6cf78bed11dec8aaada Mon Sep 17 00:00:00 2001 From: adz Date: Mon, 5 Jun 2023 10:22:11 +0200 Subject: [PATCH 094/126] Fix import style --- aquadoggo/src/network/replication/behaviour.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/aquadoggo/src/network/replication/behaviour.rs b/aquadoggo/src/network/replication/behaviour.rs index 483c7a6c3..ab1807f8d 100644 --- a/aquadoggo/src/network/replication/behaviour.rs +++ b/aquadoggo/src/network/replication/behaviour.rs @@ -167,10 +167,8 @@ mod tests { use p2panda_rs::schema::SchemaId; use rstest::rstest; - use crate::{ - replication::{Message, SyncMessage, TargetSet}, - test_utils::helpers::random_target_set, - }; + use crate::replication::{Message, SyncMessage, TargetSet}; + use crate::test_utils::helpers::random_target_set; use super::{Behaviour as ReplicationBehaviour, Event}; From 7f24e41c0473349d3844b719af1c8600783fad2b Mon Sep 17 00:00:00 2001 From: adz Date: Mon, 5 Jun 2023 10:24:36 +0200 Subject: [PATCH 095/126] Fix import style --- aquadoggo/src/replication/strategies/diff.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/aquadoggo/src/replication/strategies/diff.rs b/aquadoggo/src/replication/strategies/diff.rs index 3a3f70b90..c90e5c088 100644 --- a/aquadoggo/src/replication/strategies/diff.rs +++ b/aquadoggo/src/replication/strategies/diff.rs @@ -3,11 +3,9 @@ use std::collections::HashMap; use log::trace; -use p2panda_rs::{ - entry::{LogId, SeqNum}, - identity::PublicKey, - Human, -}; +use p2panda_rs::entry::{LogId, SeqNum}; +use p2panda_rs::identity::PublicKey; +use p2panda_rs::Human; use crate::replication::LogHeight; From dbb6546317bbf8346ca8501583ff4294d9323818 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Mon, 5 Jun 2023 14:16:59 +0100 Subject: [PATCH 096/126] Remove no longer relevant log message --- aquadoggo/src/replication/service.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index 331e9f507..344f8a7de 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -270,7 +270,6 @@ impl ConnectionManager { if active_sessions.len() < MAX_SESSIONS_PER_PEER && !has_active_target_set_session { Some(peer_id) } else { - debug!("Max sessions reached for peer: {peer_id}"); None } }) From 445d1aa3a0d171dd3a748426c23ece927ded2bb3 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Thu, 8 Jun 2023 09:39:16 +0100 Subject: [PATCH 097/126] Stop dialing peer after one address dialed successfully --- aquadoggo/src/network/service.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/aquadoggo/src/network/service.rs b/aquadoggo/src/network/service.rs index 0e6c9117c..904098708 100644 --- a/aquadoggo/src/network/service.rs +++ b/aquadoggo/src/network/service.rs @@ -255,8 +255,11 @@ impl EventLoop { for (peer_id, multiaddr) in list { debug!("mDNS discovered a new peer: {peer_id}"); - if let Err(err) = self.swarm.dial(multiaddr) { + if let Err(err) = self.swarm.dial(peer_id) { warn!("Failed to dial: {}", err); + } else { + debug!("Dial success: skip remaining addresses for: {peer_id}"); + break } // // Only dial the newly discovered peer if we're not already connected. From 2b904f5306b8b1f32b8324a20cdb6276e433aa24 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Thu, 8 Jun 2023 10:22:59 +0100 Subject: [PATCH 098/126] Only accept one inbound and one outbound connection per peer --- .../src/network/replication/behaviour.rs | 162 +++++++++++++++--- aquadoggo/src/replication/errors.rs | 10 ++ 2 files changed, 151 insertions(+), 21 deletions(-) diff --git a/aquadoggo/src/network/replication/behaviour.rs b/aquadoggo/src/network/replication/behaviour.rs index ab1807f8d..20dbe6cd4 100644 --- a/aquadoggo/src/network/replication/behaviour.rs +++ b/aquadoggo/src/network/replication/behaviour.rs @@ -1,19 +1,21 @@ // SPDX-License-Identifier: AGPL-3.0-or-later -use std::collections::VecDeque; +use std::collections::{HashMap, VecDeque}; use std::task::{Context, Poll}; use libp2p::core::Endpoint; use libp2p::swarm::derive_prelude::ConnectionEstablished; use libp2p::swarm::{ - ConnectionClosed, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, NotifyHandler, - PollParameters, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + ConnectionClosed, ConnectionDenied, ConnectionId, DialFailure, FromSwarm, ListenFailure, + ListenerClosed, ListenerError, NetworkBehaviour, NotifyHandler, PollParameters, THandler, + THandlerInEvent, THandlerOutEvent, ToSwarm, }; use libp2p::{Multiaddr, PeerId}; -use log::trace; +use log::{debug, trace, warn}; use p2panda_rs::Human; use crate::network::replication::handler::{Handler, HandlerInEvent, HandlerOutEvent}; +use crate::replication::errors::ConnectionError; use crate::replication::SyncMessage; #[derive(Debug)] @@ -31,15 +33,35 @@ pub enum Event { #[derive(Debug)] pub struct Behaviour { events: VecDeque>, + inbound_connections: HashMap, + outbound_connections: HashMap, } impl Behaviour { pub fn new() -> Self { Self { events: VecDeque::new(), + inbound_connections: HashMap::new(), + outbound_connections: HashMap::new(), } } + fn set_inbound_connection(&mut self, peer_id: PeerId, connection_id: ConnectionId) -> bool { + if self.inbound_connections.get(&peer_id).is_some() { + return false; + } + self.inbound_connections.insert(peer_id, connection_id); + true + } + + fn set_outbound_connection(&mut self, peer_id: PeerId, connection_id: ConnectionId) -> bool { + if self.outbound_connections.get(&peer_id).is_some() { + return false; + } + self.outbound_connections.insert(peer_id, connection_id); + true + } + fn handle_received_message(&mut self, peer_id: &PeerId, message: SyncMessage) { trace!( "Notify swarm of received sync message: {peer_id} {}", @@ -80,34 +102,72 @@ impl NetworkBehaviour for Behaviour { fn handle_established_inbound_connection( &mut self, - _: ConnectionId, - _: PeerId, - _: &Multiaddr, - _: &Multiaddr, + connection_id: ConnectionId, + peer_id: PeerId, + _local_addr: &Multiaddr, + remote_address: &Multiaddr, ) -> Result, ConnectionDenied> { + // We only want max one inbound connection per peer, so reject this connection if we + // already have one assigned. + if self.inbound_connections.get(&peer_id).is_some() { + debug!("Connection denied: inbound connection already exists for: {peer_id}"); + return Err(ConnectionDenied::new( + ConnectionError::MultipleInboundConnections(peer_id.to_owned()), + )); + } + debug!( + "New connection: established inbound connection with peer: {peer_id} {remote_address}" + ); + self.set_inbound_connection(peer_id, connection_id); Ok(Handler::new()) } fn handle_established_outbound_connection( &mut self, - _: ConnectionId, - _: PeerId, + connection_id: ConnectionId, + peer_id: PeerId, _: &Multiaddr, _: Endpoint, ) -> Result, ConnectionDenied> { + // We only want max one outbound connection per peer, so reject this connection if we + // already have one assigned. + if self.outbound_connections.get(&peer_id).is_some() { + debug!("Connection denied: outbound connection already exists for: {peer_id}"); + return Err(ConnectionDenied::new( + ConnectionError::MultipleOutboundConnections(peer_id), + )); + } + debug!("New connection: established outbound connection with peer: {peer_id}"); + self.set_outbound_connection(peer_id, connection_id); Ok(Handler::new()) } fn on_connection_handler_event( &mut self, - peer: PeerId, - _connection_id: ConnectionId, + peer_id: PeerId, + connection_id: ConnectionId, handler_event: THandlerOutEvent, ) { - match handler_event { - HandlerOutEvent::Message(message) => { - self.handle_received_message(&peer, message); + // We only want to process messages which arrive for connections we have assigned to this peer. + let mut current_inbound = false; + let mut current_outbound = false; + + if let Some(inbound_connection_id) = self.inbound_connections.get(&peer_id) { + current_inbound = *inbound_connection_id == connection_id; + } + + if let Some(outbound_connection_id) = self.outbound_connections.get(&peer_id) { + current_outbound = *outbound_connection_id == connection_id; + } + + if current_inbound || current_outbound { + match handler_event { + HandlerOutEvent::Message(message) => { + self.handle_received_message(&peer_id, message); + } } + } else { + debug!("Message ignored: message arrived on an unknown connection for: {peer_id}"); } } @@ -115,22 +175,82 @@ impl NetworkBehaviour for Behaviour { match event { FromSwarm::ConnectionClosed(ConnectionClosed { peer_id, - remaining_established, + connection_id, .. }) => { - if remaining_established == 0 { - self.events - .push_back(ToSwarm::GenerateEvent(Event::PeerDisconnected(peer_id))); + let inbound = self.inbound_connections.get(&peer_id); + let outbound = self.outbound_connections.get(&peer_id); + + match (inbound, outbound) { + // An inbound and outbound connection exists for this peer + (Some(inbound_connection_id), Some(outbound_connection_id)) => { + if *outbound_connection_id == connection_id { + debug!( + "Remove connections: remove outbound connection with peer: {peer_id}" + ); + self.outbound_connections.remove(&peer_id); + } + + if *inbound_connection_id == connection_id { + debug!( + "Remove connections: remove inbound connection with peer: {peer_id}" + ); + self.inbound_connections.remove(&peer_id); + } + } + // Only an outbound connection exists + (None, Some(outbound_connection_id)) => { + debug!( + "Remove connections: remove outbound connection with peer: {peer_id}" + ); + if *outbound_connection_id == connection_id { + self.outbound_connections.remove(&peer_id); + self.events + .push_back(ToSwarm::GenerateEvent(Event::PeerDisconnected( + peer_id, + ))); + } + } + // Only an inbound connection exists, + (Some(inbound_connection_id), None) => { + debug!( + "Remove connections: remove inbound connection with peer: {peer_id}" + ); + if *inbound_connection_id == connection_id { + self.inbound_connections.remove(&peer_id); + self.events + .push_back(ToSwarm::GenerateEvent(Event::PeerDisconnected( + peer_id, + ))); + } + } + (None, None) => { + warn!("Attempted to disconnect a peer with no known connections"); + } } } FromSwarm::ConnectionEstablished(ConnectionEstablished { peer_id, - other_established, + connection_id, .. }) => { - if other_established == 0 { + // We only want to issue PeerConnected messages for connections we have accepted. + let mut current_inbound = false; + let mut current_outbound = false; + + if let Some(inbound_connection_id) = self.inbound_connections.get(&peer_id) { + current_inbound = *inbound_connection_id == connection_id; + } + + if let Some(outbound_connection_id) = self.outbound_connections.get(&peer_id) { + current_outbound = *outbound_connection_id == connection_id; + } + + if current_inbound || current_outbound { self.events .push_back(ToSwarm::GenerateEvent(Event::PeerConnected(peer_id))); + } else { + warn!("Unknown connection: ignoring unknown connection with: {peer_id}"); } } FromSwarm::AddressChange(_) diff --git a/aquadoggo/src/replication/errors.rs b/aquadoggo/src/replication/errors.rs index c55ae4e1c..8d4c71468 100644 --- a/aquadoggo/src/replication/errors.rs +++ b/aquadoggo/src/replication/errors.rs @@ -1,9 +1,19 @@ // SPDX-License-Identifier: AGPL-3.0-or-later +use libp2p::PeerId; use thiserror::Error; use crate::replication::TargetSet; +#[derive(Error, Debug)] +pub enum ConnectionError { + #[error("Reject duplicate inbound connection with peer: {0}")] + MultipleInboundConnections(PeerId), + + #[error("Reject duplicate outbound connection with peer: {0}")] + MultipleOutboundConnections(PeerId), +} + #[derive(Error, Debug)] pub enum ReplicationError { #[error("Remote peer requested unsupported replication mode")] From 58275db70f7ec904bd5c1d628cb5914e09adb386 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Thu, 8 Jun 2023 11:11:17 +0100 Subject: [PATCH 099/126] fmt x clippy --- aquadoggo/src/network/replication/behaviour.rs | 5 ++--- aquadoggo/src/network/service.rs | 4 ++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/aquadoggo/src/network/replication/behaviour.rs b/aquadoggo/src/network/replication/behaviour.rs index 20dbe6cd4..47ddfcd9b 100644 --- a/aquadoggo/src/network/replication/behaviour.rs +++ b/aquadoggo/src/network/replication/behaviour.rs @@ -6,9 +6,8 @@ use std::task::{Context, Poll}; use libp2p::core::Endpoint; use libp2p::swarm::derive_prelude::ConnectionEstablished; use libp2p::swarm::{ - ConnectionClosed, ConnectionDenied, ConnectionId, DialFailure, FromSwarm, ListenFailure, - ListenerClosed, ListenerError, NetworkBehaviour, NotifyHandler, PollParameters, THandler, - THandlerInEvent, THandlerOutEvent, ToSwarm, + ConnectionClosed, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, NotifyHandler, + PollParameters, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; use libp2p::{Multiaddr, PeerId}; use log::{debug, trace, warn}; diff --git a/aquadoggo/src/network/service.rs b/aquadoggo/src/network/service.rs index 904098708..80ffda7e2 100644 --- a/aquadoggo/src/network/service.rs +++ b/aquadoggo/src/network/service.rs @@ -255,11 +255,11 @@ impl EventLoop { for (peer_id, multiaddr) in list { debug!("mDNS discovered a new peer: {peer_id}"); - if let Err(err) = self.swarm.dial(peer_id) { + if let Err(err) = self.swarm.dial(multiaddr) { warn!("Failed to dial: {}", err); } else { debug!("Dial success: skip remaining addresses for: {peer_id}"); - break + break; } // // Only dial the newly discovered peer if we're not already connected. From 759aaee09c14f499e588012454bee3953724115e Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Fri, 9 Jun 2023 14:11:04 +0100 Subject: [PATCH 100/126] Use libp2p from git main --- Cargo.lock | 1434 +++-------------- aquadoggo/Cargo.toml | 8 +- aquadoggo/src/network/identity.rs | 10 +- .../src/network/replication/behaviour.rs | 19 +- aquadoggo/src/network/replication/handler.rs | 37 +- aquadoggo/src/network/replication/protocol.rs | 6 +- aquadoggo/src/network/service.rs | 23 +- aquadoggo/src/network/transport.rs | 8 +- aquadoggo_cli/Cargo.toml | 10 +- aquadoggo_cli/src/main.rs | 63 +- 10 files changed, 339 insertions(+), 1279 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ec634c091..eff1259c4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12,15 +12,6 @@ dependencies = [ "regex", ] -[[package]] -name = "aead" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fc95d1bdb8e6666b2b217308eeeb09f2d6728d104be3e31916cc74d15420331" -dependencies = [ - "generic-array", -] - [[package]] name = "aead" version = "0.4.3" @@ -28,28 +19,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b613b8e1e3cf911a086f53f03bf286f52fd7a7258e4fa606f0ef220d39d8877" dependencies = [ "generic-array", - "rand_core 0.6.4", -] - -[[package]] -name = "aead" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" -dependencies = [ - "crypto-common", - "generic-array", -] - -[[package]] -name = "aes" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "884391ef1066acaa41e766ba8f596341b96e93ce34f9a43e7d24bf0a0eaf0561" -dependencies = [ - "aes-soft", - "aesni", - "cipher 0.2.5", ] [[package]] @@ -59,70 +28,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" dependencies = [ "cfg-if", - "cipher 0.3.0", + "cipher", "cpufeatures", "opaque-debug", ] -[[package]] -name = "aes" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "433cfd6710c9986c576a25ca913c39d66a6474107b406f34f91d4a8923395241" -dependencies = [ - "cfg-if", - "cipher 0.4.4", - "cpufeatures", -] - [[package]] name = "aes-gcm" version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df5f85a83a7d8b0442b6aa7b504b8212c1733da07b98aae43d4bc21b2cb3cdf6" dependencies = [ - "aead 0.4.3", - "aes 0.7.5", - "cipher 0.3.0", - "ctr 0.8.0", - "ghash 0.4.4", - "subtle", -] - -[[package]] -name = "aes-gcm" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82e1366e0c69c9f927b1fa5ce2c7bf9eafc8f9268c0b9800729e8b267612447c" -dependencies = [ - "aead 0.5.2", - "aes 0.8.2", - "cipher 0.4.4", - "ctr 0.9.2", - "ghash 0.5.0", + "aead", + "aes", + "cipher", + "ctr", + "ghash", "subtle", ] -[[package]] -name = "aes-soft" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be14c7498ea50828a38d0e24a765ed2effe92a705885b57d029cd67d45744072" -dependencies = [ - "cipher 0.2.5", - "opaque-debug", -] - -[[package]] -name = "aesni" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea2e11f5e94c2f7d386164cc2aa1f97823fed6f259e486940a71c174dd01b0ce" -dependencies = [ - "cipher 0.2.5", - "opaque-debug", -] - [[package]] name = "ahash" version = "0.7.6" @@ -171,7 +95,7 @@ dependencies = [ "asynchronous-codec", "axum", "bamboo-rs-core-ed25519-yasmf", - "bs58", + "bs58 0.4.0", "ciborium", "ctor", "deadqueue", @@ -220,12 +144,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "arc-swap" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" - [[package]] name = "arrayref" version = "0.3.7" @@ -250,29 +168,13 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71938f30533e4d95a6d17aa530939da3842c2ab6f4f84b9dae68447e4129f74a" -[[package]] -name = "asn1-rs" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ff05a702273012438132f449575dbc804e27b2f3cbe3069aa237d26c98fa33" -dependencies = [ - "asn1-rs-derive 0.1.0", - "asn1-rs-impl", - "displaydoc", - "nom", - "num-traits", - "rusticata-macros", - "thiserror", - "time", -] - [[package]] name = "asn1-rs" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f6fd5ddaf0351dff5b8da21b2fb4ff8e08ddd02857f0bf69c47639106c0fff0" dependencies = [ - "asn1-rs-derive 0.4.0", + "asn1-rs-derive", "asn1-rs-impl", "displaydoc", "nom", @@ -282,18 +184,6 @@ dependencies = [ "time", ] -[[package]] -name = "asn1-rs-derive" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db8b7511298d5b7784b40b092d9e9dcd3a627a5707e4b5e507931ab0d44eeebf" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", - "synstructure", -] - [[package]] name = "asn1-rs-derive" version = "0.4.0" @@ -475,7 +365,7 @@ dependencies = [ "polling", "rustix", "slab", - "socket2", + "socket2 0.4.9", "waker-fn", ] @@ -563,7 +453,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -580,7 +470,7 @@ checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -638,7 +528,7 @@ checksum = "f8175979259124331c1d7bf6586ee7e0da434155e4b2d48ec2c8386281d8df39" dependencies = [ "async-trait", "axum-core", - "base64 0.21.0", + "base64 0.21.2", "bitflags", "bytes", "futures-util", @@ -709,12 +599,6 @@ version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" -[[package]] -name = "base16ct" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" - [[package]] name = "base64" version = "0.13.1" @@ -723,15 +607,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.0" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" - -[[package]] -name = "base64ct" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" +checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" [[package]] name = "bimap" @@ -739,15 +617,6 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "230c5f1ca6a325a32553f8640d31ac9b49f2411e901e427570154868b46da4f7" -[[package]] -name = "bincode" -version = "1.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" -dependencies = [ - "serde", -] - [[package]] name = "bitflags" version = "1.3.2" @@ -806,22 +675,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "block-modes" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57a0e8073e8baa88212fb5823574c02ebccb395136ba9a164ab89379ec6072f0" -dependencies = [ - "block-padding", - "cipher 0.2.5", -] - -[[package]] -name = "block-padding" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" - [[package]] name = "blocking" version = "1.3.1" @@ -843,6 +696,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" +[[package]] +name = "bs58" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5353f36341f7451062466f0b755b96ac3a9547e4d7f6b70d603fc721a7d7896" +dependencies = [ + "tinyvec", +] + [[package]] name = "bumpalo" version = "3.12.2" @@ -870,17 +732,6 @@ version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" -[[package]] -name = "ccm" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aca1a8fbc20b50ac9673ff014abfb2b5f4085ee1a850d408f14a159c5853ac7" -dependencies = [ - "aead 0.3.2", - "cipher 0.2.5", - "subtle", -] - [[package]] name = "cfg-if" version = "1.0.0" @@ -894,7 +745,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c80e5460aa66fe3b91d40bcbdab953a597b60053e34d684ac6903f863b680a6" dependencies = [ "cfg-if", - "cipher 0.3.0", + "cipher", "cpufeatures", "zeroize", ] @@ -905,9 +756,9 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a18446b09be63d457bbec447509e85f662f32952b035ce892290396bc0b0cff5" dependencies = [ - "aead 0.4.3", + "aead", "chacha20", - "cipher 0.3.0", + "cipher", "poly1305", "zeroize", ] @@ -939,15 +790,6 @@ dependencies = [ "half", ] -[[package]] -name = "cipher" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12f8e7987cbd042a63249497f41aed09f8e65add917ea6566effbc56578d6801" -dependencies = [ - "generic-array", -] - [[package]] name = "cipher" version = "0.3.0" @@ -957,16 +799,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "cipher" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" -dependencies = [ - "crypto-common", - "inout", -] - [[package]] name = "clap" version = "4.1.8" @@ -1023,12 +855,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "const-oid" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913" - [[package]] name = "constant_time_eq" version = "0.1.5" @@ -1120,7 +946,7 @@ dependencies = [ "autocfg", "cfg-if", "crossbeam-utils", - "memoffset 0.8.0", + "memoffset", "scopeguard", ] @@ -1149,18 +975,6 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" -[[package]] -name = "crypto-bigint" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" -dependencies = [ - "generic-array", - "rand_core 0.6.4", - "subtle", - "zeroize", -] - [[package]] name = "crypto-common" version = "0.1.6" @@ -1168,20 +982,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", - "rand_core 0.6.4", "typenum", ] -[[package]] -name = "crypto-mac" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" -dependencies = [ - "generic-array", - "subtle", -] - [[package]] name = "ctor" version = "0.1.26" @@ -1198,16 +1001,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "049bb91fb4aaf0e3c7efa6cd5ef877dbbbd15b39dad06d9948de4ec8a75761ea" dependencies = [ - "cipher 0.3.0", -] - -[[package]] -name = "ctr" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" -dependencies = [ - "cipher 0.4.4", + "cipher", ] [[package]] @@ -1274,9 +1068,9 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.3.3" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23d8666cb01533c39dde32bcbab8e227b4ed6679b2c925eba05feabea39508fb" +checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" [[package]] name = "data-encoding-macro" @@ -1308,38 +1102,13 @@ dependencies = [ "tokio", ] -[[package]] -name = "der" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" -dependencies = [ - "const-oid", - "pem-rfc7468", - "zeroize", -] - -[[package]] -name = "der-parser" -version = "7.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe398ac75057914d7d07307bf67dc7f3f574a26783b4fc7805a20ffa9f506e82" -dependencies = [ - "asn1-rs 0.3.1", - "displaydoc", - "nom", - "num-bigint", - "num-traits", - "rusticata-macros", -] - [[package]] name = "der-parser" version = "8.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbd676fbbab537128ef0278adb5576cf363cff6aa22a7b24effe97347cfab61e" dependencies = [ - "asn1-rs 0.5.2", + "asn1-rs", "displaydoc", "nom", "num-bigint", @@ -1347,37 +1116,6 @@ dependencies = [ "rusticata-macros", ] -[[package]] -name = "derive_builder" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d07adf7be193b71cc36b193d0f5fe60b918a3a9db4dad0449f57bcfd519704a3" -dependencies = [ - "derive_builder_macro", -] - -[[package]] -name = "derive_builder_core" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f91d4cfa921f1c05904dc3c57b4a32c38aed3340cce209f3a6fd1478babafc4" -dependencies = [ - "darling", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "derive_builder_macro" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f0314b72bed045f3a68671b3c86328386762c93f82d98c65c3cb5e5f573dd68" -dependencies = [ - "derive_builder_core", - "syn 1.0.109", -] - [[package]] name = "digest" version = "0.9.0" @@ -1435,7 +1173,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -1482,18 +1220,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "ecdsa" -version = "0.14.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" -dependencies = [ - "der", - "elliptic-curve", - "rfc6979", - "signature", -] - [[package]] name = "ed25519" version = "1.5.3" @@ -1526,28 +1252,6 @@ version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" -[[package]] -name = "elliptic-curve" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" -dependencies = [ - "base16ct", - "crypto-bigint", - "der", - "digest 0.10.6", - "ff", - "generic-array", - "group", - "hkdf", - "pem-rfc7468", - "pkcs8", - "rand_core 0.6.4", - "sec1", - "subtle", - "zeroize", -] - [[package]] name = "encoding_rs" version = "0.8.32" @@ -1636,16 +1340,6 @@ dependencies = [ "instant", ] -[[package]] -name = "ff" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" -dependencies = [ - "rand_core 0.6.4", - "subtle", -] - [[package]] name = "fiat-crypto" version = "0.1.20" @@ -1762,18 +1456,17 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] name = "futures-rustls" -version = "0.22.2" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2411eed028cdf8c8034eaf21f9915f956b6c3abec4d4c7949ee67f0721127bd" +checksum = "35bd3cf68c183738046838e300353e4716c674dc5e56890de4826801a6622a28" dependencies = [ "futures-io", - "rustls 0.20.8", - "webpki 0.22.0", + "rustls 0.21.1", ] [[package]] @@ -1788,6 +1481,17 @@ version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" +[[package]] +name = "futures-ticker" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9763058047f713632a52e916cc7f6a4b3fc6e9fc1ff8c5b1dc49e5a89041682e" +dependencies = [ + "futures", + "futures-timer", + "instant", +] + [[package]] name = "futures-timer" version = "3.0.2" @@ -1853,17 +1557,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1583cc1656d7839fd3732b80cf4f38850336cdb9b8ded1cd399ca62958de3c99" dependencies = [ "opaque-debug", - "polyval 0.5.3", -] - -[[package]] -name = "ghash" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" -dependencies = [ - "opaque-debug", - "polyval 0.6.0", + "polyval", ] [[package]] @@ -1879,19 +1573,8 @@ dependencies = [ ] [[package]] -name = "group" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" -dependencies = [ - "ff", - "rand_core 0.6.4", - "subtle", -] - -[[package]] -name = "h2" -version = "0.3.18" +name = "h2" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17f8a914c2987b688368b5138aa05321db91f4090cf26118185672ad588bce21" dependencies = [ @@ -2034,17 +1717,7 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437" dependencies = [ - "hmac 0.12.1", -] - -[[package]] -name = "hmac" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" -dependencies = [ - "crypto-mac", - "digest 0.9.0", + "hmac", ] [[package]] @@ -2130,7 +1803,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2", + "socket2 0.4.9", "tokio", "tower-service", "tracing", @@ -2205,15 +1878,6 @@ dependencies = [ "serde", ] -[[package]] -name = "inout" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" -dependencies = [ - "generic-array", -] - [[package]] name = "instant" version = "0.1.12" @@ -2223,25 +1887,6 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "interceptor" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e8a11ae2da61704edada656798b61c94b35ecac2c58eb955156987d5e6be90b" -dependencies = [ - "async-trait", - "bytes", - "log", - "rand 0.8.5", - "rtcp", - "rtp", - "thiserror", - "tokio", - "waitgroup", - "webrtc-srtp", - "webrtc-util", -] - [[package]] name = "io-lifetimes" version = "1.0.10" @@ -2259,7 +1904,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd302af1b90f2463a98fa5ad469fc212c8e3175a41c3068601bfa2727591c5be" dependencies = [ - "socket2", + "socket2 0.4.9", "widestring", "winapi", "winreg", @@ -2333,9 +1978,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.144" +version = "0.2.146" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b00cc1c228a6782d0f076e7b232802e0c5689d41bb5df366f2a6b6621cfdfe1" +checksum = "f92be4933c13fd498862a9e02a3055f8a8d9c039ce33db97306fd5a6caa7f29b" [[package]] name = "libm" @@ -2345,9 +1990,8 @@ checksum = "7fc7aa29613bd6a620df431842069224d8bc9011086b1db4c0e0cd47fa03ec9a" [[package]] name = "libp2p" -version = "0.51.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f210d259724eae82005b5c48078619b7745edb7b76de370b03f8ba59ea103097" +version = "0.52.0" +source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" dependencies = [ "bytes", "futures", @@ -2367,12 +2011,10 @@ dependencies = [ "libp2p-metrics", "libp2p-noise", "libp2p-ping", - "libp2p-quic", "libp2p-relay", "libp2p-rendezvous", "libp2p-swarm", "libp2p-tcp", - "libp2p-webrtc", "libp2p-yamux", "multiaddr", "pin-project", @@ -2380,9 +2022,8 @@ dependencies = [ [[package]] name = "libp2p-allow-block-list" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "510daa05efbc25184458db837f6f9a5143888f1caa742426d92e1833ddd38a50" +version = "0.2.0" +source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" dependencies = [ "libp2p-core", "libp2p-identity", @@ -2392,9 +2033,8 @@ dependencies = [ [[package]] name = "libp2p-autonat" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ff5fc529665c9abf4e642fb28c0efd83536f6216cc3abf28e37a011a2d6dc5" +version = "0.11.0" +source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" dependencies = [ "async-trait", "futures", @@ -2411,9 +2051,8 @@ dependencies = [ [[package]] name = "libp2p-connection-limits" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4caa33f1d26ed664c4fe2cca81a08c8e07d4c1c04f2f4ac7655c2dd85467fda0" +version = "0.2.0" +source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" dependencies = [ "libp2p-core", "libp2p-identity", @@ -2423,9 +2062,8 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.39.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c1df63c0b582aa434fb09b2d86897fa2b419ffeccf934b36f87fcedc8e835c2" +version = "0.40.0" +source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" dependencies = [ "either", "fnv", @@ -2452,12 +2090,12 @@ dependencies = [ [[package]] name = "libp2p-dns" -version = "0.39.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "146ff7034daae62077c415c2376b8057368042df6ab95f5432ad5e88568b1554" +version = "0.40.0" +source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" dependencies = [ "futures", "libp2p-core", + "libp2p-identity", "log", "parking_lot 0.12.1", "smallvec", @@ -2466,16 +2104,18 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" -version = "0.44.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac213adad69bd9866fe87c37fbf241626715e5cd454fb6df9841aa2b02440ee" +version = "0.45.0" +source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" dependencies = [ "asynchronous-codec", - "base64 0.21.0", + "base64 0.21.2", "byteorder", "bytes", + "either", "fnv", "futures", + "futures-ticker", + "getrandom 0.2.9", "hex_fmt", "instant", "libp2p-core", @@ -2490,16 +2130,14 @@ dependencies = [ "serde", "sha2 0.10.6", "smallvec", - "thiserror", "unsigned-varint", - "wasm-timer", + "void", ] [[package]] name = "libp2p-identify" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5455f472243e63b9c497ff320ded0314254a9eb751799a39c283c6f20b793f3c" +version = "0.43.0" +source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" dependencies = [ "asynchronous-codec", "either", @@ -2519,14 +2157,13 @@ dependencies = [ [[package]] name = "libp2p-identity" -version = "0.1.2" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e2d584751cecb2aabaa56106be6be91338a60a0f4e420cf2af639204f596fc1" +checksum = "93a7b3534b84fe89985d8076246806c845e716d0cdac50925dcf65caf49ab3c0" dependencies = [ - "bs58", + "bs58 0.5.0", "ed25519-dalek", "log", - "multiaddr", "multihash", "quick-protobuf", "rand 0.8.5", @@ -2538,9 +2175,8 @@ dependencies = [ [[package]] name = "libp2p-kad" -version = "0.43.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39d5ef876a2b2323d63c258e63c2f8e36f205fe5a11f0b3095d59635650790ff" +version = "0.44.0" +source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" dependencies = [ "arrayvec 0.7.2", "asynchronous-codec", @@ -2567,9 +2203,8 @@ dependencies = [ [[package]] name = "libp2p-mdns" -version = "0.43.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19983e1f949f979a928f2c603de1cf180cc0dc23e4ac93a62651ccb18341460b" +version = "0.44.0" +source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" dependencies = [ "data-encoding", "futures", @@ -2580,7 +2215,7 @@ dependencies = [ "log", "rand 0.8.5", "smallvec", - "socket2", + "socket2 0.5.3", "tokio", "trust-dns-proto", "void", @@ -2588,23 +2223,24 @@ dependencies = [ [[package]] name = "libp2p-metrics" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a42ec91e227d7d0dafa4ce88b333cdf5f277253873ab087555c92798db2ddd46" +version = "0.13.0" +source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" dependencies = [ + "instant", "libp2p-core", "libp2p-identify", + "libp2p-identity", "libp2p-ping", "libp2p-relay", "libp2p-swarm", + "once_cell", "prometheus-client", ] [[package]] name = "libp2p-noise" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c3673da89d29936bc6435bafc638e2f184180d554ce844db65915113f86ec5e" +version = "0.43.0" +source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" dependencies = [ "bytes", "curve25519-dalek 3.2.0", @@ -2619,21 +2255,21 @@ dependencies = [ "snow", "static_assertions 1.1.0", "thiserror", - "x25519-dalek 1.1.1", + "x25519-dalek", "zeroize", ] [[package]] name = "libp2p-ping" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e57759c19c28a73ef1eb3585ca410cefb72c1a709fcf6de1612a378e4219202" +version = "0.43.0" +source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" dependencies = [ "either", "futures", "futures-timer", "instant", "libp2p-core", + "libp2p-identity", "libp2p-swarm", "log", "rand 0.8.5", @@ -2642,9 +2278,8 @@ dependencies = [ [[package]] name = "libp2p-plaintext" -version = "0.39.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ff582c0b74ffda004b716b97bc9cfe7f39960204877758b876dde86093beaa8" +version = "0.40.0" +source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" dependencies = [ "asynchronous-codec", "bytes", @@ -2654,14 +2289,12 @@ dependencies = [ "log", "quick-protobuf", "unsigned-varint", - "void", ] [[package]] name = "libp2p-quic" -version = "0.7.0-alpha.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6b26abd81cd2398382a1edfe739b539775be8a90fa6914f39b2ab49571ec735" +version = "0.8.0-alpha" +source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" dependencies = [ "bytes", "futures", @@ -2674,16 +2307,15 @@ dependencies = [ "parking_lot 0.12.1", "quinn-proto", "rand 0.8.5", - "rustls 0.20.8", + "rustls 0.21.1", "thiserror", "tokio", ] [[package]] name = "libp2p-relay" -version = "0.15.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23f34cef39bbc4d020a1e538e2af2bdd707143569de87e7ce6f1500373db0b41" +version = "0.16.0" +source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" dependencies = [ "asynchronous-codec", "bytes", @@ -2705,9 +2337,8 @@ dependencies = [ [[package]] name = "libp2p-rendezvous" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "633f2dc23d63ad04955642f3025e740a943da4deb79b252b5fcf882208164467" +version = "0.13.0" +source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" dependencies = [ "asynchronous-codec", "bimap", @@ -2727,9 +2358,8 @@ dependencies = [ [[package]] name = "libp2p-request-response" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffdb374267d42dc5ed5bc53f6e601d4a64ac5964779c6e40bb9e4f14c1e30d5" +version = "0.25.0" +source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" dependencies = [ "async-trait", "futures", @@ -2737,15 +2367,16 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm", + "log", "rand 0.8.5", "smallvec", + "void", ] [[package]] name = "libp2p-swarm" -version = "0.42.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd1e223f02fcd7e3790f9b954e2e81791810e3512daeb27fa97df7652e946bc2" +version = "0.43.0" +source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" dependencies = [ "async-std", "either", @@ -2757,6 +2388,8 @@ dependencies = [ "libp2p-identity", "libp2p-swarm-derive", "log", + "multistream-select", + "once_cell", "rand 0.8.5", "smallvec", "tokio", @@ -2765,20 +2398,20 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fba456131824ab6acd4c7bf61e9c0f0a3014b5fc9868ccb8e10d344594cdc4f" +version = "0.33.0" +source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" dependencies = [ "heck", + "proc-macro-warning", + "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.18", ] [[package]] name = "libp2p-swarm-test" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f6cb31f26505d134ce7106f419de02a8d9640ea56d92f751138933fbec8184d" +version = "0.2.0" +source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" dependencies = [ "async-trait", "futures", @@ -2795,9 +2428,8 @@ dependencies = [ [[package]] name = "libp2p-tcp" -version = "0.39.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33d33698596d7722d85d3ab0c86c2c322254fce1241e91208e3679b4eb3026cf" +version = "0.40.0" +source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" dependencies = [ "async-io", "futures", @@ -2805,66 +2437,34 @@ dependencies = [ "if-watch", "libc", "libp2p-core", + "libp2p-identity", "log", - "socket2", + "socket2 0.5.3", "tokio", ] [[package]] name = "libp2p-tls" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff08d13d0dc66e5e9ba6279c1de417b84fa0d0adc3b03e5732928c180ec02781" +version = "0.2.0" +source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" dependencies = [ "futures", "futures-rustls", "libp2p-core", "libp2p-identity", - "rcgen 0.10.0", + "rcgen", "ring", - "rustls 0.20.8", + "rustls 0.21.1", "thiserror", - "webpki 0.22.0", - "x509-parser 0.14.0", + "webpki", + "x509-parser", "yasna", ] -[[package]] -name = "libp2p-webrtc" -version = "0.4.0-alpha.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dba48592edbc2f60b4bc7c10d65445b0c3964c07df26fdf493b6880d33be36f8" -dependencies = [ - "async-trait", - "asynchronous-codec", - "bytes", - "futures", - "futures-timer", - "hex", - "if-watch", - "libp2p-core", - "libp2p-identity", - "libp2p-noise", - "log", - "multihash", - "quick-protobuf", - "quick-protobuf-codec", - "rand 0.8.5", - "rcgen 0.9.3", - "serde", - "stun", - "thiserror", - "tinytemplate", - "tokio", - "tokio-util", - "webrtc", -] - [[package]] name = "libp2p-yamux" -version = "0.43.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dcd21d950662700a385d4c6d68e2f5f54d778e97068cdd718522222ef513bda" +version = "0.44.0" +source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" dependencies = [ "futures", "libp2p-core", @@ -2920,11 +2520,10 @@ dependencies = [ [[package]] name = "log" -version = "0.4.17" +version = "0.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" +checksum = "518ef76f2f87365916b142844c16d8fefd85039bc5699050210a7778ee1cd1de" dependencies = [ - "cfg-if", "value-bag", ] @@ -2979,15 +2578,6 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" -[[package]] -name = "memoffset" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" -dependencies = [ - "autocfg", -] - [[package]] name = "memoffset" version = "0.8.0" @@ -3053,14 +2643,14 @@ dependencies = [ [[package]] name = "multiaddr" -version = "0.17.1" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b36f567c7099511fa8612bbbb52dda2419ce0bdbacf31714e3a5ffdb766d3bd" +checksum = "92a651988b3ed3ad1bc8c87d016bb92f6f395b84ed1db9b926b32b1fc5a2c8b5" dependencies = [ "arrayref", "byteorder", "data-encoding", - "log", + "libp2p-identity", "multibase", "multihash", "percent-encoding", @@ -3083,38 +2673,19 @@ dependencies = [ [[package]] name = "multihash" -version = "0.17.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "835d6ff01d610179fbce3de1694d007e500bf33a7f29689838941d6bf783ae40" +checksum = "2fd59dcc2bbe70baabeac52cd22ae52c55eefe6c38ff11a9439f16a350a939f2" dependencies = [ "core2", - "digest 0.10.6", - "multihash-derive", "serde", - "serde-big-array", - "sha2 0.10.6", "unsigned-varint", ] -[[package]] -name = "multihash-derive" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d6d4752e6230d8ef7adf7bd5d8c4b1f6561c1014c5ba9a37445ccefe18aa1db" -dependencies = [ - "proc-macro-crate", - "proc-macro-error", - "proc-macro2", - "quote", - "syn 1.0.109", - "synstructure", -] - [[package]] name = "multistream-select" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8552ab875c1313b97b8d20cb857b9fd63e2d1d6a0a1b53ce9821e575405f27a" +version = "0.13.0" +source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" dependencies = [ "bytes", "futures", @@ -3200,7 +2771,6 @@ dependencies = [ "bitflags", "cfg-if", "libc", - "memoffset 0.6.5", ] [[package]] @@ -3259,29 +2829,20 @@ dependencies = [ "libc", ] -[[package]] -name = "oid-registry" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38e20717fa0541f39bd146692035c37bedfa532b3e5071b35761082407546b2a" -dependencies = [ - "asn1-rs 0.3.1", -] - [[package]] name = "oid-registry" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bedf36ffb6ba96c2eb7144ef6270557b52e54b20c0a8e1eb2ff99a6c6959bff" dependencies = [ - "asn1-rs 0.5.2", + "asn1-rs", ] [[package]] name = "once_cell" -version = "1.17.1" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "opaque-debug" @@ -3301,17 +2862,6 @@ version = "6.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ceedf44fb00f2d1984b0bc98102627ce622e083e49a5bacdb3e514fa4238e267" -[[package]] -name = "p256" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" -dependencies = [ - "ecdsa", - "elliptic-curve", - "sha2 0.10.6", -] - [[package]] name = "p2panda-rs" version = "0.7.0" @@ -3341,17 +2891,6 @@ dependencies = [ "yasmf-hash", ] -[[package]] -name = "p384" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc8c5bf642dde52bb9e87c0ecd8ca5a76faac2eeed98dedb7c717997e1080aa" -dependencies = [ - "ecdsa", - "elliptic-curve", - "sha2 0.10.6", -] - [[package]] name = "packed_simd_2" version = "0.3.8" @@ -3431,15 +2970,6 @@ dependencies = [ "base64 0.13.1", ] -[[package]] -name = "pem-rfc7468" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d159833a9105500e0398934e205e0773f0b27529557134ecfc51c27646adac" -dependencies = [ - "base64ct", -] - [[package]] name = "percent-encoding" version = "2.2.0" @@ -3476,7 +3006,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -3492,22 +3022,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.12" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" +checksum = "c95a7476719eab1e366eaf73d0260af3021184f18177925b07f54b30089ceead" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.12" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" +checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.18", ] [[package]] @@ -3522,16 +3052,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" -[[package]] -name = "pkcs8" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" -dependencies = [ - "der", - "spki", -] - [[package]] name = "pkg-config" version = "0.3.27" @@ -3568,7 +3088,7 @@ checksum = "048aeb476be11a4b6ca432ca569e375810de9294ae78f4774e78ea98a9246ede" dependencies = [ "cpufeatures", "opaque-debug", - "universal-hash 0.4.1", + "universal-hash", ] [[package]] @@ -3580,19 +3100,7 @@ dependencies = [ "cfg-if", "cpufeatures", "opaque-debug", - "universal-hash 0.4.1", -] - -[[package]] -name = "polyval" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef234e08c11dfcb2e56f79fd70f6f2eb7f025c0ce2333e82f4f0518ecad30c6" -dependencies = [ - "cfg-if", - "cpufeatures", - "opaque-debug", - "universal-hash 0.5.0", + "universal-hash", ] [[package]] @@ -3635,20 +3143,31 @@ dependencies = [ "version_check", ] +[[package]] +name = "proc-macro-warning" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70550716265d1ec349c41f70dd4f964b4fd88394efe4405f0c1da679c4799a07" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.18", +] + [[package]] name = "proc-macro2" -version = "1.0.56" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" +checksum = "dec2b086b7a862cf4de201096214fa870344cf922b2b30c167badb3af3195406" dependencies = [ "unicode-ident", ] [[package]] name = "prometheus-client" -version = "0.19.0" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d6fa99d535dd930d1249e6c79cb3c2915f9172a540fe2b02a4c8f9ca954721e" +checksum = "78c2f43e8969d51935d2a7284878ae053ba30034cd563f673cde37ba5205685e" dependencies = [ "dtoa", "itoa", @@ -3684,9 +3203,8 @@ dependencies = [ [[package]] name = "quick-protobuf-codec" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1693116345026436eb2f10b677806169c1a1260c1c60eaaffe3fb5a29ae23d8b" +version = "0.2.0" +source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" dependencies = [ "asynchronous-codec", "bytes", @@ -3697,27 +3215,26 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.9.3" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67c10f662eee9c94ddd7135043e544f3c82fa839a1e7b865911331961b53186c" +checksum = "85af4ed6ee5a89f26a26086e9089a6643650544c025158449a3626ebf72884b3" dependencies = [ "bytes", "rand 0.8.5", "ring", "rustc-hash", - "rustls 0.20.8", + "rustls 0.21.1", "slab", "thiserror", "tinyvec", "tracing", - "webpki 0.22.0", ] [[package]] name = "quote" -version = "1.0.27" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f4f29d145265ec1c483c7c654450edde0bfe043d3938d6972630663356d9500" +checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" dependencies = [ "proc-macro2", ] @@ -3815,19 +3332,6 @@ dependencies = [ "num_cpus", ] -[[package]] -name = "rcgen" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6413f3de1edee53342e6138e75b56d32e7bc6e332b3bd62d497b1929d4cfbcdd" -dependencies = [ - "pem", - "ring", - "time", - "x509-parser 0.13.2", - "yasna", -] - [[package]] name = "rcgen" version = "0.10.0" @@ -3871,9 +3375,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.8.1" +version = "1.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af83e617f331cc6ae2da5443c602dfa5af81e517212d9d611a5b3ba1777b5370" +checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f" dependencies = [ "aho-corasick", "memchr", @@ -3882,9 +3386,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5996294f19bd3aae0453a862ad728f60e6600695733dd5df01da90c54363a3c" +checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78" [[package]] name = "reqwest" @@ -3892,7 +3396,7 @@ version = "0.11.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13293b639a097af28fc8a90f22add145a9c954e49d77da06263d58cf44d5fb91" dependencies = [ - "base64 0.21.0", + "base64 0.21.2", "bytes", "encoding_rs", "futures-core", @@ -3932,17 +3436,6 @@ dependencies = [ "quick-error", ] -[[package]] -name = "rfc6979" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" -dependencies = [ - "crypto-bigint", - "hmac 0.12.1", - "zeroize", -] - [[package]] name = "ring" version = "0.16.20" @@ -4032,17 +3525,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "rtcp" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1919efd6d4a6a85d13388f9487549bb8e359f17198cc03ffd72f79b553873691" -dependencies = [ - "bytes", - "thiserror", - "webrtc-util", -] - [[package]] name = "rtnetlink" version = "0.10.1" @@ -4059,20 +3541,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "rtp" -version = "0.6.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2a095411ff00eed7b12e4c6a118ba984d113e1079582570d56a5ee723f11f80" -dependencies = [ - "async-trait", - "bytes", - "rand 0.8.5", - "serde", - "thiserror", - "webrtc-util", -] - [[package]] name = "rustc-hash" version = "1.1.0" @@ -4113,27 +3581,26 @@ dependencies = [ [[package]] name = "rustls" -version = "0.19.1" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" +checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" dependencies = [ - "base64 0.13.1", "log", "ring", - "sct 0.6.1", - "webpki 0.21.4", + "sct", + "webpki", ] [[package]] name = "rustls" -version = "0.20.8" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" +checksum = "c911ba11bc8433e811ce56fde130ccf32f5127cab0e0194e9c68c5a5b671791e" dependencies = [ "log", "ring", - "sct 0.7.0", - "webpki 0.22.0", + "rustls-webpki", + "sct", ] [[package]] @@ -4142,20 +3609,29 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" dependencies = [ - "base64 0.21.0", + "base64 0.21.2", ] [[package]] -name = "rustversion" -version = "1.0.12" +name = "rustls-webpki" +version = "0.100.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6207cd5ed3d8dca7816f8f3725513a34609c0c765bf652b8c3cb4cfd87db46b" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "rustversion" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" [[package]] name = "rw-stream-sink" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26338f5e09bb721b85b135ea05af7767c90b52f6de4f087d4f4a3a9d64e7dc04" +version = "0.4.0" +source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" dependencies = [ "futures", "pin-project", @@ -4174,16 +3650,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" -[[package]] -name = "sct" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "sct" version = "0.7.0" @@ -4194,32 +3660,6 @@ dependencies = [ "untrusted", ] -[[package]] -name = "sdp" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d22a5ef407871893fd72b4562ee15e4742269b173959db4b8df6f538c414e13" -dependencies = [ - "rand 0.8.5", - "substring", - "thiserror", - "url", -] - -[[package]] -name = "sec1" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" -dependencies = [ - "base16ct", - "der", - "generic-array", - "pkcs8", - "subtle", - "zeroize", -] - [[package]] name = "semver" version = "1.0.17" @@ -4235,15 +3675,6 @@ dependencies = [ "serde_derive", ] -[[package]] -name = "serde-big-array" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd31f59f6fe2b0c055371bb2f16d7f0aa7d8881676c04a55b1596d1a17cd10a4" -dependencies = [ - "serde", -] - [[package]] name = "serde-wasm-bindgen" version = "0.4.5" @@ -4282,7 +3713,7 @@ checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -4317,19 +3748,6 @@ dependencies = [ "serde", ] -[[package]] -name = "sha-1" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", -] - [[package]] name = "sha1" version = "0.10.5" @@ -4389,10 +3807,6 @@ name = "signature" version = "1.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" -dependencies = [ - "digest 0.10.6", - "rand_core 0.6.4", -] [[package]] name = "slab" @@ -4453,7 +3867,7 @@ version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ccba027ba85743e09d15c03296797cad56395089b832b48b5a5217880f57733" dependencies = [ - "aes-gcm 0.9.4", + "aes-gcm", "blake2", "chacha20poly1305", "curve25519-dalek 4.0.0-rc.1", @@ -4474,6 +3888,16 @@ dependencies = [ "winapi", ] +[[package]] +name = "socket2" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2538b18701741680e0322a2302176d3253a35388e2e62f172f64f4f16605f877" +dependencies = [ + "libc", + "windows-sys 0.48.0", +] + [[package]] name = "spin" version = "0.5.2" @@ -4489,16 +3913,6 @@ dependencies = [ "lock_api", ] -[[package]] -name = "spki" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" -dependencies = [ - "base64ct", - "der", -] - [[package]] name = "sqlformat" version = "0.2.1" @@ -4547,7 +3961,7 @@ dependencies = [ "hashlink", "hex", "hkdf", - "hmac 0.12.1", + "hmac", "indexmap", "itoa", "libc", @@ -4634,34 +4048,6 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" -[[package]] -name = "stun" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7e94b1ec00bad60e6410e058b52f1c66de3dc5fe4d62d09b3e52bb7d3b73e25" -dependencies = [ - "base64 0.13.1", - "crc", - "lazy_static", - "md-5", - "rand 0.8.5", - "ring", - "subtle", - "thiserror", - "tokio", - "url", - "webrtc-util", -] - -[[package]] -name = "substring" -version = "1.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42ee6433ecef213b2e72f587ef64a2f5943e7cd16fbd82dbe8bc07486c534c86" -dependencies = [ - "autocfg", -] - [[package]] name = "subtle" version = "2.4.1" @@ -4681,9 +4067,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.15" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a34fcf3e8b60f57e6a14301a2e916d323af98b0ea63c599441eec8558660c822" +checksum = "32d41677bcbe24c20c52e7c70b0d8db04134c5d1066bf98662e2871ad200ea3e" dependencies = [ "proc-macro2", "quote", @@ -4768,7 +4154,7 @@ checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -4798,16 +4184,6 @@ dependencies = [ "time-core", ] -[[package]] -name = "tinytemplate" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" -dependencies = [ - "serde", - "serde_json", -] - [[package]] name = "tinyvec" version = "1.6.0" @@ -4825,33 +4201,32 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.25.0" +version = "1.28.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8e00990ebabbe4c14c08aca901caed183ecd5c09562a12c824bb53d3c3fd3af" +checksum = "94d7b1cfd2aa4011f2de74c2c4c63665e27a71006b0a192dcd2710272e73dfa2" dependencies = [ "autocfg", "bytes", "libc", - "memchr", "mio", "num_cpus", "parking_lot 0.12.1", "pin-project-lite", "signal-hook-registry", - "socket2", + "socket2 0.4.9", "tokio-macros", - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] [[package]] name = "tokio-macros" -version = "1.8.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" +checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.18", ] [[package]] @@ -4862,7 +4237,7 @@ checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ "rustls 0.20.8", "tokio", - "webpki 0.22.0", + "webpki", ] [[package]] @@ -4980,7 +4355,7 @@ checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -5016,7 +4391,7 @@ dependencies = [ "lazy_static", "rand 0.8.5", "smallvec", - "socket2", + "socket2 0.4.9", "thiserror", "tinyvec", "tokio", @@ -5069,25 +4444,6 @@ dependencies = [ "utf-8", ] -[[package]] -name = "turn" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4712ee30d123ec7ae26d1e1b218395a16c87cdbaf4b3925d170d684af62ea5e8" -dependencies = [ - "async-trait", - "base64 0.13.1", - "futures", - "log", - "md-5", - "rand 0.8.5", - "ring", - "stun", - "thiserror", - "tokio", - "webrtc-util", -] - [[package]] name = "typenum" version = "1.16.0" @@ -5161,16 +4517,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "universal-hash" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d3160b73c9a19f7e2939a2fdad446c57c1bbbbf4d919d3213ff1267a580d8b5" -dependencies = [ - "crypto-common", - "subtle", -] - [[package]] name = "unsigned-varint" version = "0.7.1" @@ -5204,24 +4550,11 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" -[[package]] -name = "uuid" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dad5567ad0cf5b760e5665964bec1b47dfd077ba8a2544b513f3556d3d239a2" -dependencies = [ - "getrandom 0.2.9", -] - [[package]] name = "value-bag" -version = "1.0.0-alpha.9" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2209b78d1249f7e6f3293657c9779fe31ced465df091bbd433a1cf88e916ec55" -dependencies = [ - "ctor", - "version_check", -] +checksum = "a4d330786735ea358f3bc09eea4caa098569c1c93f342d9aca0514915022fe7e" [[package]] name = "varu64" @@ -5256,15 +4589,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" -[[package]] -name = "waitgroup" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1f50000a783467e6c0200f9d10642f4bc424e39efc1b770203e88b488f79292" -dependencies = [ - "atomic-waker", -] - [[package]] name = "waker-fn" version = "1.1.0" @@ -5314,7 +4638,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", "wasm-bindgen-shared", ] @@ -5348,7 +4672,7 @@ checksum = "4783ce29f09b9d93134d41297aded3a712b7b979e9c6f28c32cb88c973a94869" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -5372,21 +4696,6 @@ dependencies = [ "web-sys", ] -[[package]] -name = "wasm-timer" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be0ecb0db480561e9a7642b5d3e4187c128914e58aa84330b9493e3eb68c5e7f" -dependencies = [ - "futures", - "js-sys", - "parking_lot 0.11.2", - "pin-utils", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", -] - [[package]] name = "web-sys" version = "0.3.62" @@ -5397,16 +4706,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "webpki" -version = "0.21.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "webpki" version = "0.22.0" @@ -5423,216 +4722,7 @@ version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" dependencies = [ - "webpki 0.22.0", -] - -[[package]] -name = "webrtc" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d3bc9049bdb2cea52f5fd4f6f728184225bdb867ed0dc2410eab6df5bdd67bb" -dependencies = [ - "arc-swap", - "async-trait", - "bytes", - "hex", - "interceptor", - "lazy_static", - "log", - "rand 0.8.5", - "rcgen 0.9.3", - "regex", - "ring", - "rtcp", - "rtp", - "rustls 0.19.1", - "sdp", - "serde", - "serde_json", - "sha2 0.10.6", - "stun", - "thiserror", - "time", - "tokio", - "turn", - "url", - "waitgroup", - "webrtc-data", - "webrtc-dtls", - "webrtc-ice", - "webrtc-mdns", - "webrtc-media", - "webrtc-sctp", - "webrtc-srtp", - "webrtc-util", -] - -[[package]] -name = "webrtc-data" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ef36a4d12baa6e842582fe9ec16a57184ba35e1a09308307b67d43ec8883100" -dependencies = [ - "bytes", - "derive_builder", - "log", - "thiserror", - "tokio", - "webrtc-sctp", - "webrtc-util", -] - -[[package]] -name = "webrtc-dtls" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "942be5bd85f072c3128396f6e5a9bfb93ca8c1939ded735d177b7bcba9a13d05" -dependencies = [ - "aes 0.6.0", - "aes-gcm 0.10.1", - "async-trait", - "bincode", - "block-modes", - "byteorder", - "ccm", - "curve25519-dalek 3.2.0", - "der-parser 8.2.0", - "elliptic-curve", - "hkdf", - "hmac 0.12.1", - "log", - "oid-registry 0.6.1", - "p256", - "p384", - "rand 0.8.5", - "rand_core 0.6.4", - "rcgen 0.9.3", - "ring", - "rustls 0.19.1", - "sec1", - "serde", - "sha1", - "sha2 0.10.6", - "signature", - "subtle", - "thiserror", - "tokio", - "webpki 0.21.4", - "webrtc-util", - "x25519-dalek 2.0.0-pre.1", - "x509-parser 0.13.2", -] - -[[package]] -name = "webrtc-ice" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "465a03cc11e9a7d7b4f9f99870558fe37a102b65b93f8045392fef7c67b39e80" -dependencies = [ - "arc-swap", - "async-trait", - "crc", - "log", - "rand 0.8.5", - "serde", - "serde_json", - "stun", - "thiserror", - "tokio", - "turn", - "url", - "uuid", - "waitgroup", - "webrtc-mdns", - "webrtc-util", -] - -[[package]] -name = "webrtc-mdns" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f08dfd7a6e3987e255c4dbe710dde5d94d0f0574f8a21afa95d171376c143106" -dependencies = [ - "log", - "socket2", - "thiserror", - "tokio", - "webrtc-util", -] - -[[package]] -name = "webrtc-media" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f72e1650a8ae006017d1a5280efb49e2610c19ccc3c0905b03b648aee9554991" -dependencies = [ - "byteorder", - "bytes", - "rand 0.8.5", - "rtp", - "thiserror", -] - -[[package]] -name = "webrtc-sctp" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d47adcd9427eb3ede33d5a7f3424038f63c965491beafcc20bc650a2f6679c0" -dependencies = [ - "arc-swap", - "async-trait", - "bytes", - "crc", - "log", - "rand 0.8.5", - "thiserror", - "tokio", - "webrtc-util", -] - -[[package]] -name = "webrtc-srtp" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6183edc4c1c6c0175f8812eefdce84dfa0aea9c3ece71c2bf6ddd3c964de3da5" -dependencies = [ - "aead 0.4.3", - "aes 0.7.5", - "aes-gcm 0.9.4", - "async-trait", - "byteorder", - "bytes", - "ctr 0.8.0", - "hmac 0.11.0", - "log", - "rtcp", - "rtp", - "sha-1", - "subtle", - "thiserror", - "tokio", - "webrtc-util", -] - -[[package]] -name = "webrtc-util" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93f1db1727772c05cf7a2cfece52c3aca8045ca1e176cd517d323489aa3c6d87" -dependencies = [ - "async-trait", - "bitflags", - "bytes", - "cc", - "ipnet", - "lazy_static", - "libc", - "log", - "nix", - "rand 0.8.5", - "thiserror", - "tokio", - "winapi", + "webpki", ] [[package]] @@ -5695,21 +4785,6 @@ dependencies = [ "windows_x86_64_msvc 0.34.0", ] -[[package]] -name = "windows-sys" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" -dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", -] - [[package]] name = "windows-sys" version = "0.45.0" @@ -5892,49 +4967,18 @@ dependencies = [ "zeroize", ] -[[package]] -name = "x25519-dalek" -version = "2.0.0-pre.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5da623d8af10a62342bcbbb230e33e58a63255a58012f8653c578e54bab48df" -dependencies = [ - "curve25519-dalek 3.2.0", - "rand_core 0.6.4", - "zeroize", -] - [[package]] name = "x509-parser" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb9bace5b5589ffead1afb76e43e34cff39cd0f3ce7e170ae0c29e53b88eb1c" -dependencies = [ - "asn1-rs 0.3.1", - "base64 0.13.1", - "data-encoding", - "der-parser 7.0.0", - "lazy_static", - "nom", - "oid-registry 0.4.0", - "ring", - "rusticata-macros", - "thiserror", - "time", -] - -[[package]] -name = "x509-parser" -version = "0.14.0" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0ecbeb7b67ce215e40e3cc7f2ff902f94a223acf44995934763467e7b1febc8" +checksum = "bab0c2f54ae1d92f4fcb99c0b7ccf0b1e3451cbd395e5f115ccbdbcb18d4f634" dependencies = [ - "asn1-rs 0.5.2", - "base64 0.13.1", + "asn1-rs", "data-encoding", - "der-parser 8.2.0", + "der-parser", "lazy_static", "nom", - "oid-registry 0.6.1", + "oid-registry", "rusticata-macros", "thiserror", "time", @@ -5996,5 +5040,5 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.18", ] diff --git a/aquadoggo/Cargo.toml b/aquadoggo/Cargo.toml index 0745042cd..4638775a0 100644 --- a/aquadoggo/Cargo.toml +++ b/aquadoggo/Cargo.toml @@ -33,7 +33,7 @@ envy = "0.4.2" futures = "0.3.23" hex = "0.4.3" http = "0.2.9" -libp2p = { version = "0.51.3", features = [ +libp2p = { git = "https://github.com/libp2p/rust-libp2p", features = [ "autonat", "identify", "macros", @@ -46,10 +46,10 @@ libp2p = { version = "0.51.3", features = [ "tokio", "yamux", ] } -libp2p-quic = { version = "0.7.0-alpha.3", features = ["tokio"] } +libp2p-quic = { git = "https://github.com/libp2p/rust-libp2p", features = ["tokio"] } lipmaa-link = "0.2.2" log = "0.4.17" -once_cell = "1.17.0" +once_cell = "1.18.0" openssl-probe = "0.1.5" p2panda-rs = { git = "https://github.com/p2panda/p2panda", rev = "ae60bf754a60a1daf9c6862e9ae51ee7501b007f", features = [ "storage-provider", @@ -82,7 +82,7 @@ ctor = "0.1.23" env_logger = "0.9.0" http = "0.2.9" hyper = "0.14.19" -libp2p-swarm-test = "0.1.0" +libp2p-swarm-test = { git = "https://github.com/libp2p/rust-libp2p" } once_cell = "1.17.0" p2panda-rs = { git = "https://github.com/p2panda/p2panda", rev = "ae60bf754a60a1daf9c6862e9ae51ee7501b007f", features = [ "test-utils", diff --git a/aquadoggo/src/network/identity.rs b/aquadoggo/src/network/identity.rs index a5594deef..04ae2043b 100644 --- a/aquadoggo/src/network/identity.rs +++ b/aquadoggo/src/network/identity.rs @@ -47,12 +47,8 @@ impl Identity for Keypair { /// Encode the private key as a hex string and save it to the given file path. // See: https://github.com/p2panda/aquadoggo/issues/295 - #[allow(deprecated)] fn save(&self, path: &Path) -> Result<()> { - let private_key = match self { - Keypair::Ed25519(key_pair) => key_pair.secret(), - }; - let encoded_private_key = hex::encode(private_key); + let encoded_private_key = hex::encode(self.key_pair().try_into_ed25519()?.secret()); fs::create_dir_all(path.parent().unwrap())?; let mut file = File::create(path)?; @@ -68,7 +64,6 @@ impl Identity for Keypair { /// Load a key pair from file at the given path. // See: https://github.com/p2panda/aquadoggo/issues/295 - #[allow(deprecated)] fn load(path: &Path) -> Result where Self: Sized, @@ -78,8 +73,7 @@ impl Identity for Keypair { file.read_to_string(&mut contents)?; let private_key_bytes = hex::decode(contents)?; - let private_key = ed25519::SecretKey::from_bytes(private_key_bytes)?; - let key_pair = Keypair::Ed25519(private_key.into()); + let key_pair = Keypair::ed25519_from_bytes(private_key_bytes)?; Ok(key_pair) } diff --git a/aquadoggo/src/network/replication/behaviour.rs b/aquadoggo/src/network/replication/behaviour.rs index 47ddfcd9b..f18658cf4 100644 --- a/aquadoggo/src/network/replication/behaviour.rs +++ b/aquadoggo/src/network/replication/behaviour.rs @@ -13,7 +13,7 @@ use libp2p::{Multiaddr, PeerId}; use log::{debug, trace, warn}; use p2panda_rs::Human; -use crate::network::replication::handler::{Handler, HandlerInEvent, HandlerOutEvent}; +use crate::network::replication::handler::{Handler, HandlerFromBehaviour, HandlerToBehaviour}; use crate::replication::errors::ConnectionError; use crate::replication::SyncMessage; @@ -31,7 +31,7 @@ pub enum Event { #[derive(Debug)] pub struct Behaviour { - events: VecDeque>, + events: VecDeque>, inbound_connections: HashMap, outbound_connections: HashMap, } @@ -79,7 +79,7 @@ impl Behaviour { ); self.events.push_back(ToSwarm::NotifyHandler { peer_id, - event: HandlerInEvent::Message(message), + event: HandlerFromBehaviour::Message(message), handler: NotifyHandler::Any, }); } @@ -88,7 +88,7 @@ impl Behaviour { pub fn handle_error(&mut self, peer_id: PeerId) { self.events.push_back(ToSwarm::NotifyHandler { peer_id, - event: HandlerInEvent::ReplicationError, + event: HandlerFromBehaviour::ReplicationError, handler: NotifyHandler::Any, }); } @@ -97,7 +97,7 @@ impl Behaviour { impl NetworkBehaviour for Behaviour { type ConnectionHandler = Handler; - type OutEvent = Event; + type ToSwarm = Event; fn handle_established_inbound_connection( &mut self, @@ -161,7 +161,7 @@ impl NetworkBehaviour for Behaviour { if current_inbound || current_outbound { match handler_event { - HandlerOutEvent::Message(message) => { + HandlerToBehaviour::Message(message) => { self.handle_received_message(&peer_id, message); } } @@ -260,8 +260,9 @@ impl NetworkBehaviour for Behaviour { | FromSwarm::ExpiredListenAddr(_) | FromSwarm::ListenerError(_) | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddr(_) - | FromSwarm::ExpiredExternalAddr(_) => {} + | FromSwarm::NewExternalAddrCandidate(_) + | FromSwarm::ExternalAddrConfirmed(_) + | FromSwarm::ExternalAddrExpired(_) => {} } } @@ -269,7 +270,7 @@ impl NetworkBehaviour for Behaviour { &mut self, _cx: &mut Context<'_>, _params: &mut impl PollParameters, - ) -> Poll>> { + ) -> Poll>> { if let Some(event) = self.events.pop_front() { return Poll::Ready(event); } diff --git a/aquadoggo/src/network/replication/handler.rs b/aquadoggo/src/network/replication/handler.rs index 70ba09d55..7947ad45e 100644 --- a/aquadoggo/src/network/replication/handler.rs +++ b/aquadoggo/src/network/replication/handler.rs @@ -7,11 +7,14 @@ use std::time::{Duration, Instant}; use asynchronous_codec::Framed; use futures::{Sink, StreamExt}; -use libp2p::swarm::handler::{ConnectionEvent, FullyNegotiatedInbound, FullyNegotiatedOutbound}; +use libp2p::swarm::handler::{ + ConnectionEvent, FullyNegotiatedInbound, FullyNegotiatedOutbound, ProtocolsChange, +}; use libp2p::swarm::{ - ConnectionHandler, ConnectionHandlerEvent, KeepAlive, NegotiatedSubstream, SubstreamProtocol, + ConnectionHandler, ConnectionHandlerEvent, KeepAlive, Stream as NegotiatedStream, + SubstreamProtocol, }; -use log::warn; +use log::{debug, warn}; use thiserror::Error; use crate::network::replication::{Codec, CodecError, Protocol}; @@ -84,7 +87,7 @@ impl Handler { /// An event sent from the network behaviour to the connection handler. #[derive(Debug)] -pub enum HandlerInEvent { +pub enum HandlerFromBehaviour { /// Replication message to send on outbound stream. Message(SyncMessage), @@ -96,7 +99,7 @@ pub enum HandlerInEvent { /// /// This informs the network behaviour of various events created by the handler. #[derive(Debug)] -pub enum HandlerOutEvent { +pub enum HandlerToBehaviour { /// Replication message received on the inbound stream. Message(SyncMessage), } @@ -113,7 +116,7 @@ pub enum HandlerError { RemotePeerDisconnected, } -type Stream = Framed; +type Stream = Framed; /// State of the inbound substream, opened either by us or by the remote. enum InboundSubstreamState { @@ -143,8 +146,8 @@ enum OutboundSubstreamState { } impl ConnectionHandler for Handler { - type InEvent = HandlerInEvent; - type OutEvent = HandlerOutEvent; + type FromBehaviour = HandlerFromBehaviour; + type ToBehaviour = HandlerToBehaviour; type Error = HandlerError; type InboundProtocol = Protocol; type OutboundProtocol = Protocol; @@ -176,15 +179,21 @@ impl ConnectionHandler for Handler { | ConnectionEvent::ListenUpgradeError(_) => { warn!("Connection event error"); } + ConnectionEvent::LocalProtocolsChange(_) => { + debug!("ConnectionEvent: LocalProtocolsChange") + } + ConnectionEvent::RemoteProtocolsChange(_) => { + debug!("ConnectionEvent: RemoteProtocolsChange") + } } } - fn on_behaviour_event(&mut self, event: Self::InEvent) { + fn on_behaviour_event(&mut self, event: Self::FromBehaviour) { match event { - HandlerInEvent::Message(message) => { + HandlerFromBehaviour::Message(message) => { self.send_queue.push_back(message); } - HandlerInEvent::ReplicationError => { + HandlerFromBehaviour::ReplicationError => { self.critical_error = true; } } @@ -208,7 +217,7 @@ impl ConnectionHandler for Handler { ConnectionHandlerEvent< Self::OutboundProtocol, Self::OutboundOpenInfo, - Self::OutEvent, + Self::ToBehaviour, Self::Error, >, > { @@ -246,8 +255,8 @@ impl ConnectionHandler for Handler { self.inbound_substream = Some(InboundSubstreamState::WaitingInput(substream)); - return Poll::Ready(ConnectionHandlerEvent::Custom( - HandlerOutEvent::Message(message), + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + HandlerToBehaviour::Message(message), )); } Poll::Ready(Some(Err(err))) => { diff --git a/aquadoggo/src/network/replication/protocol.rs b/aquadoggo/src/network/replication/protocol.rs index a73edca2b..ba06e0e55 100644 --- a/aquadoggo/src/network/replication/protocol.rs +++ b/aquadoggo/src/network/replication/protocol.rs @@ -9,7 +9,7 @@ use libp2p::{InboundUpgrade, OutboundUpgrade}; use crate::replication::SyncMessage; -pub const PROTOCOL_NAME: &[u8] = b"/p2p/p2panda/1.0.0"; +pub const PROTOCOL_NAME: &str = "/p2p/p2panda/1.0.0"; pub type CodecError = CborCodecError; @@ -25,11 +25,11 @@ impl Protocol { } impl UpgradeInfo for Protocol { - type Info = Vec; + type Info = String; type InfoIter = Vec; fn protocol_info(&self) -> Self::InfoIter { - vec![PROTOCOL_NAME.to_vec()] + vec![PROTOCOL_NAME.to_string()] } } diff --git a/aquadoggo/src/network/service.rs b/aquadoggo/src/network/service.rs index 80ffda7e2..2be19d162 100644 --- a/aquadoggo/src/network/service.rs +++ b/aquadoggo/src/network/service.rs @@ -3,7 +3,7 @@ use anyhow::Result; use libp2p::multiaddr::Protocol; use libp2p::ping::Event; -use libp2p::swarm::{AddressScore, SwarmEvent}; +use libp2p::swarm::SwarmEvent; use libp2p::{autonat, identify, mdns, rendezvous, Multiaddr, Swarm}; use log::{debug, info, trace, warn}; use tokio::task; @@ -18,6 +18,7 @@ use crate::network::config::NODE_NAMESPACE; use crate::network::replication; use crate::network::swarm; use crate::network::NetworkConfiguration; +use crate::replication::errors::ConnectionError; /// Network service that configures and deploys a network swarm over QUIC transports. /// @@ -181,7 +182,6 @@ impl EventLoop { // ~~~~~ // Swarm // ~~~~~ - SwarmEvent::Dialing(peer_id) => info!("Dialing: {peer_id}"), SwarmEvent::ConnectionEstablished { peer_id, num_established, @@ -220,10 +220,12 @@ impl EventLoop { address, } => trace!("ExpiredListenAddr: {listener_id:?} {address}"), SwarmEvent::IncomingConnection { + connection_id, local_addr, send_back_addr, } => debug!("IncomingConnection: {local_addr} {send_back_addr}"), SwarmEvent::IncomingConnectionError { + connection_id, local_addr, send_back_addr, error, @@ -242,7 +244,11 @@ impl EventLoop { } => { info!("Listening on {address}"); } - SwarmEvent::OutgoingConnectionError { peer_id, error } => { + SwarmEvent::OutgoingConnectionError { + connection_id, + peer_id, + error, + } => { warn!("OutgoingConnectionError: {peer_id:?} {error:?}"); // self.send_service_message(ServiceMessage::ConnectionError(peer_id)); } @@ -285,7 +291,11 @@ impl EventLoop { // ~~~~ // Ping // ~~~~ - SwarmEvent::Behaviour(BehaviourEvent::Ping(Event { peer, result: _ })) => { + SwarmEvent::Behaviour(BehaviourEvent::Ping(Event { + connection, + peer, + result: _, + })) => { debug!("Ping from: {peer}") } @@ -312,7 +322,7 @@ impl EventLoop { if peer_id != local_peer_id { debug!("Discovered peer {peer_id} at {address}"); - let p2p_suffix = Protocol::P2p(*peer_id.as_ref()); + let p2p_suffix = Protocol::P2p(peer_id); let address_with_p2p = if !address .ends_with(&Multiaddr::empty().with(p2p_suffix.clone())) { @@ -410,8 +420,7 @@ impl EventLoop { if let Some(addr) = self.external_circuit_addr.clone() { trace!("Adding external relayed listen address: {}", addr); - self.swarm - .add_external_address(addr, AddressScore::Finite(1)); + self.swarm.add_external_address(addr); if let Some(rendezvous_peer_id) = self.network_config.rendezvous_peer_id { diff --git a/aquadoggo/src/network/transport.rs b/aquadoggo/src/network/transport.rs index 582d877eb..2087e4665 100644 --- a/aquadoggo/src/network/transport.rs +++ b/aquadoggo/src/network/transport.rs @@ -5,10 +5,8 @@ use libp2p::core::muxing::StreamMuxerBox; use libp2p::core::transport::upgrade::Version; use libp2p::core::transport::{Boxed, OrTransport}; use libp2p::identity::Keypair; -#[allow(deprecated)] -use libp2p::noise::NoiseAuthenticated; -#[allow(deprecated)] -use libp2p::yamux::YamuxConfig; +use libp2p::noise::Config as NoiseConfig; +use libp2p::yamux::Config as YamuxConfig; use libp2p::{relay, PeerId, Transport}; use libp2p_quic as quic; @@ -34,7 +32,7 @@ pub async fn build_transport( // Add encryption and multiplexing to the relay transport let relay_transport = relay_transport .upgrade(Version::V1) - .authenticate(NoiseAuthenticated::xx(key_pair).unwrap()) + .authenticate(NoiseConfig::new(key_pair).unwrap()) .multiplex(YamuxConfig::default()); // The relay transport only handles listening and dialing on a relayed Multiaddr; it depends diff --git a/aquadoggo_cli/Cargo.toml b/aquadoggo_cli/Cargo.toml index 585caafbc..b9a3f3765 100644 --- a/aquadoggo_cli/Cargo.toml +++ b/aquadoggo_cli/Cargo.toml @@ -20,11 +20,11 @@ path = "src/main.rs" doc = false [dependencies] -anyhow = "=1.0.62" -tokio = { version = "=1.25.0", features = ["full"] } -env_logger = "=0.9.0" -clap = { version = "=4.1.8", features = ["derive"] } -libp2p = "=0.51.3" +anyhow = "1.0.62" +tokio = { version = "1.25.0", features = ["full"] } +env_logger = "0.9.0" +clap = { version = "4.1.8", features = ["derive"] } +libp2p = { git = "https://github.com/libp2p/rust-libp2p" } [dependencies.aquadoggo] version = "~0.4.0" diff --git a/aquadoggo_cli/src/main.rs b/aquadoggo_cli/src/main.rs index 49780fde4..52197d749 100644 --- a/aquadoggo_cli/src/main.rs +++ b/aquadoggo_cli/src/main.rs @@ -2,6 +2,7 @@ #![allow(clippy::uninlined_format_args)] use std::convert::{TryFrom, TryInto}; +use std::str::FromStr; use anyhow::Result; use aquadoggo::{Configuration, NetworkConfiguration, Node}; @@ -65,33 +66,34 @@ struct Cli { impl Cli { // Run custom validators on parsed CLI input fn validate(self) -> Self { - // Ensure rendezvous server address includes a peer ID - if let Some(addr) = &self.rendezvous_address { - // Check if the given `Multiaddr` contains a `PeerId` - if PeerId::try_from_multiaddr(addr).is_none() { - // Print a help message about the missing value(s) and exit - Cli::command() - .error( - ClapErrorKind::ValueValidation, - "'--rendezvous-address' must include the peer ID of the server", - ) - .exit() - } - } - - // Ensure relay server address includes a peer ID - if let Some(addr) = &self.relay_address { - // Check if the given `Multiaddr` contains a `PeerId` - if PeerId::try_from_multiaddr(addr).is_none() { - // Print a help message about the missing value(s) and exit - Cli::command() - .error( - ClapErrorKind::ValueValidation, - "'--relay-address' must include the peer ID of the server", - ) - .exit() - } - } + // @TODO: This needs updating for v0.5.2 https://github.com/libp2p/rust-libp2p/pull/3656 + // // Ensure rendezvous server address includes a peer ID + // if let Some(addr) = &self.rendezvous_address { + // // Check if the given `Multiaddr` contains a `PeerId` + // if PeerId::try_from_multiaddr(addr).is_none() { + // // Print a help message about the missing value(s) and exit + // Cli::command() + // .error( + // ClapErrorKind::ValueValidation, + // "'--rendezvous-address' must include the peer ID of the server", + // ) + // .exit() + // } + // } + // + // // Ensure relay server address includes a peer ID + // if let Some(addr) = &self.relay_address { + // // Check if the given `Multiaddr` contains a `PeerId` + // if PeerId::try_from_multiaddr(addr).is_none() { + // // Print a help message about the missing value(s) and exit + // Cli::command() + // .error( + // ClapErrorKind::ValueValidation, + // "'--relay-address' must include the peer ID of the server", + // ) + // .exit() + // } + // } self } @@ -104,13 +106,16 @@ impl TryFrom for Configuration { let mut config = Configuration::new(cli.data_dir)?; let relay_peer_id = if let Some(addr) = &cli.relay_address { - PeerId::try_from_multiaddr(addr) + // @TODO: This needs updating for v0.5.2 https://github.com/libp2p/rust-libp2p/pull/3656 + + Some(PeerId::from_str(&addr.into_iter().last().unwrap().to_string()).unwrap()) } else { None }; let rendezvous_peer_id = if let Some(addr) = &cli.rendezvous_address { - PeerId::try_from_multiaddr(addr) + // @TODO: This needs updating for v0.5.2 https://github.com/libp2p/rust-libp2p/pull/3656 + Some(PeerId::from_str(&addr.into_iter().last().unwrap().to_string()).unwrap()) } else { None }; From 24d67edb71711858e779ad8a4e58a0932053bba1 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Fri, 9 Jun 2023 14:14:45 +0100 Subject: [PATCH 101/126] Add network info logging on incoming connection errors --- aquadoggo/src/network/service.rs | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/aquadoggo/src/network/service.rs b/aquadoggo/src/network/service.rs index 2be19d162..9d5ae8ef1 100644 --- a/aquadoggo/src/network/service.rs +++ b/aquadoggo/src/network/service.rs @@ -229,7 +229,29 @@ impl EventLoop { local_addr, send_back_addr, error, - } => warn!("IncomingConnectionError: {local_addr} {send_back_addr} {error:?}"), + } => { + warn!("IncomingConnectionError: {local_addr} {send_back_addr} {error:?}"); + debug!("{:#?}", self.swarm.network_info()); + // We can access the PeerId here and call disconnect_peer_id on swarm to clean up + // any dangling connections, but there shouldn't actually be any.... and when i + // try this it doesn't have an effect (we still hit pending/established connection + // limits) + // + // match error { + // libp2p::swarm::ListenError::Denied { cause } => { + // let error = cause.downcast::(); + // if let Ok(error) = error { + // match error { + // ConnectionError::MultipleInboundConnections(peer_id) => { + // let _ = self.swarm.disconnect_peer_id(peer_id); + // } + // _ => (), + // } + // } + // } + // _ => (), + // } + } SwarmEvent::ListenerClosed { listener_id, addresses, From e4a3e753d0d64f65f3a5d2307716093c7ba09712 Mon Sep 17 00:00:00 2001 From: adz Date: Mon, 12 Jun 2023 15:57:32 +0200 Subject: [PATCH 102/126] Revert --- Cargo.lock | 1434 ++++++++++++++--- aquadoggo/Cargo.toml | 8 +- aquadoggo/src/network/identity.rs | 10 +- .../src/network/replication/behaviour.rs | 174 +- aquadoggo/src/network/replication/handler.rs | 37 +- aquadoggo/src/network/replication/protocol.rs | 6 +- aquadoggo/src/network/service.rs | 51 +- aquadoggo/src/network/transport.rs | 8 +- aquadoggo/src/replication/errors.rs | 10 - aquadoggo_cli/Cargo.toml | 10 +- aquadoggo_cli/src/main.rs | 63 +- 11 files changed, 1300 insertions(+), 511 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index eff1259c4..ec634c091 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12,6 +12,15 @@ dependencies = [ "regex", ] +[[package]] +name = "aead" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fc95d1bdb8e6666b2b217308eeeb09f2d6728d104be3e31916cc74d15420331" +dependencies = [ + "generic-array", +] + [[package]] name = "aead" version = "0.4.3" @@ -19,6 +28,28 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b613b8e1e3cf911a086f53f03bf286f52fd7a7258e4fa606f0ef220d39d8877" dependencies = [ "generic-array", + "rand_core 0.6.4", +] + +[[package]] +name = "aead" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" +dependencies = [ + "crypto-common", + "generic-array", +] + +[[package]] +name = "aes" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "884391ef1066acaa41e766ba8f596341b96e93ce34f9a43e7d24bf0a0eaf0561" +dependencies = [ + "aes-soft", + "aesni", + "cipher 0.2.5", ] [[package]] @@ -28,25 +59,70 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" dependencies = [ "cfg-if", - "cipher", + "cipher 0.3.0", "cpufeatures", "opaque-debug", ] +[[package]] +name = "aes" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "433cfd6710c9986c576a25ca913c39d66a6474107b406f34f91d4a8923395241" +dependencies = [ + "cfg-if", + "cipher 0.4.4", + "cpufeatures", +] + [[package]] name = "aes-gcm" version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df5f85a83a7d8b0442b6aa7b504b8212c1733da07b98aae43d4bc21b2cb3cdf6" dependencies = [ - "aead", - "aes", - "cipher", - "ctr", - "ghash", + "aead 0.4.3", + "aes 0.7.5", + "cipher 0.3.0", + "ctr 0.8.0", + "ghash 0.4.4", + "subtle", +] + +[[package]] +name = "aes-gcm" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82e1366e0c69c9f927b1fa5ce2c7bf9eafc8f9268c0b9800729e8b267612447c" +dependencies = [ + "aead 0.5.2", + "aes 0.8.2", + "cipher 0.4.4", + "ctr 0.9.2", + "ghash 0.5.0", "subtle", ] +[[package]] +name = "aes-soft" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be14c7498ea50828a38d0e24a765ed2effe92a705885b57d029cd67d45744072" +dependencies = [ + "cipher 0.2.5", + "opaque-debug", +] + +[[package]] +name = "aesni" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea2e11f5e94c2f7d386164cc2aa1f97823fed6f259e486940a71c174dd01b0ce" +dependencies = [ + "cipher 0.2.5", + "opaque-debug", +] + [[package]] name = "ahash" version = "0.7.6" @@ -95,7 +171,7 @@ dependencies = [ "asynchronous-codec", "axum", "bamboo-rs-core-ed25519-yasmf", - "bs58 0.4.0", + "bs58", "ciborium", "ctor", "deadqueue", @@ -144,6 +220,12 @@ dependencies = [ "tokio", ] +[[package]] +name = "arc-swap" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" + [[package]] name = "arrayref" version = "0.3.7" @@ -168,13 +250,29 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71938f30533e4d95a6d17aa530939da3842c2ab6f4f84b9dae68447e4129f74a" +[[package]] +name = "asn1-rs" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30ff05a702273012438132f449575dbc804e27b2f3cbe3069aa237d26c98fa33" +dependencies = [ + "asn1-rs-derive 0.1.0", + "asn1-rs-impl", + "displaydoc", + "nom", + "num-traits", + "rusticata-macros", + "thiserror", + "time", +] + [[package]] name = "asn1-rs" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f6fd5ddaf0351dff5b8da21b2fb4ff8e08ddd02857f0bf69c47639106c0fff0" dependencies = [ - "asn1-rs-derive", + "asn1-rs-derive 0.4.0", "asn1-rs-impl", "displaydoc", "nom", @@ -184,6 +282,18 @@ dependencies = [ "time", ] +[[package]] +name = "asn1-rs-derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db8b7511298d5b7784b40b092d9e9dcd3a627a5707e4b5e507931ab0d44eeebf" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", + "synstructure", +] + [[package]] name = "asn1-rs-derive" version = "0.4.0" @@ -365,7 +475,7 @@ dependencies = [ "polling", "rustix", "slab", - "socket2 0.4.9", + "socket2", "waker-fn", ] @@ -453,7 +563,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.18", + "syn 2.0.15", ] [[package]] @@ -470,7 +580,7 @@ checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2", "quote", - "syn 2.0.18", + "syn 2.0.15", ] [[package]] @@ -528,7 +638,7 @@ checksum = "f8175979259124331c1d7bf6586ee7e0da434155e4b2d48ec2c8386281d8df39" dependencies = [ "async-trait", "axum-core", - "base64 0.21.2", + "base64 0.21.0", "bitflags", "bytes", "futures-util", @@ -599,6 +709,12 @@ version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" +[[package]] +name = "base16ct" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" + [[package]] name = "base64" version = "0.13.1" @@ -607,9 +723,15 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.2" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" +checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" + +[[package]] +name = "base64ct" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "bimap" @@ -617,6 +739,15 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "230c5f1ca6a325a32553f8640d31ac9b49f2411e901e427570154868b46da4f7" +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + [[package]] name = "bitflags" version = "1.3.2" @@ -675,6 +806,22 @@ dependencies = [ "generic-array", ] +[[package]] +name = "block-modes" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57a0e8073e8baa88212fb5823574c02ebccb395136ba9a164ab89379ec6072f0" +dependencies = [ + "block-padding", + "cipher 0.2.5", +] + +[[package]] +name = "block-padding" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" + [[package]] name = "blocking" version = "1.3.1" @@ -696,15 +843,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" -[[package]] -name = "bs58" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5353f36341f7451062466f0b755b96ac3a9547e4d7f6b70d603fc721a7d7896" -dependencies = [ - "tinyvec", -] - [[package]] name = "bumpalo" version = "3.12.2" @@ -732,6 +870,17 @@ version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" +[[package]] +name = "ccm" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aca1a8fbc20b50ac9673ff014abfb2b5f4085ee1a850d408f14a159c5853ac7" +dependencies = [ + "aead 0.3.2", + "cipher 0.2.5", + "subtle", +] + [[package]] name = "cfg-if" version = "1.0.0" @@ -745,7 +894,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c80e5460aa66fe3b91d40bcbdab953a597b60053e34d684ac6903f863b680a6" dependencies = [ "cfg-if", - "cipher", + "cipher 0.3.0", "cpufeatures", "zeroize", ] @@ -756,9 +905,9 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a18446b09be63d457bbec447509e85f662f32952b035ce892290396bc0b0cff5" dependencies = [ - "aead", + "aead 0.4.3", "chacha20", - "cipher", + "cipher 0.3.0", "poly1305", "zeroize", ] @@ -790,6 +939,15 @@ dependencies = [ "half", ] +[[package]] +name = "cipher" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12f8e7987cbd042a63249497f41aed09f8e65add917ea6566effbc56578d6801" +dependencies = [ + "generic-array", +] + [[package]] name = "cipher" version = "0.3.0" @@ -799,6 +957,16 @@ dependencies = [ "generic-array", ] +[[package]] +name = "cipher" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" +dependencies = [ + "crypto-common", + "inout", +] + [[package]] name = "clap" version = "4.1.8" @@ -855,6 +1023,12 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "const-oid" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913" + [[package]] name = "constant_time_eq" version = "0.1.5" @@ -946,7 +1120,7 @@ dependencies = [ "autocfg", "cfg-if", "crossbeam-utils", - "memoffset", + "memoffset 0.8.0", "scopeguard", ] @@ -975,6 +1149,18 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" +[[package]] +name = "crypto-bigint" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" +dependencies = [ + "generic-array", + "rand_core 0.6.4", + "subtle", + "zeroize", +] + [[package]] name = "crypto-common" version = "0.1.6" @@ -982,9 +1168,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", + "rand_core 0.6.4", "typenum", ] +[[package]] +name = "crypto-mac" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" +dependencies = [ + "generic-array", + "subtle", +] + [[package]] name = "ctor" version = "0.1.26" @@ -1001,7 +1198,16 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "049bb91fb4aaf0e3c7efa6cd5ef877dbbbd15b39dad06d9948de4ec8a75761ea" dependencies = [ - "cipher", + "cipher 0.3.0", +] + +[[package]] +name = "ctr" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" +dependencies = [ + "cipher 0.4.4", ] [[package]] @@ -1068,9 +1274,9 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.4.0" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" +checksum = "23d8666cb01533c39dde32bcbab8e227b4ed6679b2c925eba05feabea39508fb" [[package]] name = "data-encoding-macro" @@ -1102,13 +1308,38 @@ dependencies = [ "tokio", ] +[[package]] +name = "der" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" +dependencies = [ + "const-oid", + "pem-rfc7468", + "zeroize", +] + +[[package]] +name = "der-parser" +version = "7.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe398ac75057914d7d07307bf67dc7f3f574a26783b4fc7805a20ffa9f506e82" +dependencies = [ + "asn1-rs 0.3.1", + "displaydoc", + "nom", + "num-bigint", + "num-traits", + "rusticata-macros", +] + [[package]] name = "der-parser" version = "8.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbd676fbbab537128ef0278adb5576cf363cff6aa22a7b24effe97347cfab61e" dependencies = [ - "asn1-rs", + "asn1-rs 0.5.2", "displaydoc", "nom", "num-bigint", @@ -1116,6 +1347,37 @@ dependencies = [ "rusticata-macros", ] +[[package]] +name = "derive_builder" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d07adf7be193b71cc36b193d0f5fe60b918a3a9db4dad0449f57bcfd519704a3" +dependencies = [ + "derive_builder_macro", +] + +[[package]] +name = "derive_builder_core" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f91d4cfa921f1c05904dc3c57b4a32c38aed3340cce209f3a6fd1478babafc4" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "derive_builder_macro" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f0314b72bed045f3a68671b3c86328386762c93f82d98c65c3cb5e5f573dd68" +dependencies = [ + "derive_builder_core", + "syn 1.0.109", +] + [[package]] name = "digest" version = "0.9.0" @@ -1173,7 +1435,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.18", + "syn 2.0.15", ] [[package]] @@ -1220,6 +1482,18 @@ dependencies = [ "thiserror", ] +[[package]] +name = "ecdsa" +version = "0.14.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" +dependencies = [ + "der", + "elliptic-curve", + "rfc6979", + "signature", +] + [[package]] name = "ed25519" version = "1.5.3" @@ -1252,6 +1526,28 @@ version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" +[[package]] +name = "elliptic-curve" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" +dependencies = [ + "base16ct", + "crypto-bigint", + "der", + "digest 0.10.6", + "ff", + "generic-array", + "group", + "hkdf", + "pem-rfc7468", + "pkcs8", + "rand_core 0.6.4", + "sec1", + "subtle", + "zeroize", +] + [[package]] name = "encoding_rs" version = "0.8.32" @@ -1340,6 +1636,16 @@ dependencies = [ "instant", ] +[[package]] +name = "ff" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" +dependencies = [ + "rand_core 0.6.4", + "subtle", +] + [[package]] name = "fiat-crypto" version = "0.1.20" @@ -1456,17 +1762,18 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.18", + "syn 2.0.15", ] [[package]] name = "futures-rustls" -version = "0.24.0" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35bd3cf68c183738046838e300353e4716c674dc5e56890de4826801a6622a28" +checksum = "d2411eed028cdf8c8034eaf21f9915f956b6c3abec4d4c7949ee67f0721127bd" dependencies = [ "futures-io", - "rustls 0.21.1", + "rustls 0.20.8", + "webpki 0.22.0", ] [[package]] @@ -1481,17 +1788,6 @@ version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" -[[package]] -name = "futures-ticker" -version = "0.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9763058047f713632a52e916cc7f6a4b3fc6e9fc1ff8c5b1dc49e5a89041682e" -dependencies = [ - "futures", - "futures-timer", - "instant", -] - [[package]] name = "futures-timer" version = "3.0.2" @@ -1557,7 +1853,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1583cc1656d7839fd3732b80cf4f38850336cdb9b8ded1cd399ca62958de3c99" dependencies = [ "opaque-debug", - "polyval", + "polyval 0.5.3", +] + +[[package]] +name = "ghash" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" +dependencies = [ + "opaque-debug", + "polyval 0.6.0", ] [[package]] @@ -1573,8 +1879,19 @@ dependencies = [ ] [[package]] -name = "h2" -version = "0.3.18" +name = "group" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" +dependencies = [ + "ff", + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "h2" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17f8a914c2987b688368b5138aa05321db91f4090cf26118185672ad588bce21" dependencies = [ @@ -1717,7 +2034,17 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437" dependencies = [ - "hmac", + "hmac 0.12.1", +] + +[[package]] +name = "hmac" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" +dependencies = [ + "crypto-mac", + "digest 0.9.0", ] [[package]] @@ -1803,7 +2130,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.9", + "socket2", "tokio", "tower-service", "tracing", @@ -1878,6 +2205,15 @@ dependencies = [ "serde", ] +[[package]] +name = "inout" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" +dependencies = [ + "generic-array", +] + [[package]] name = "instant" version = "0.1.12" @@ -1887,6 +2223,25 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "interceptor" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e8a11ae2da61704edada656798b61c94b35ecac2c58eb955156987d5e6be90b" +dependencies = [ + "async-trait", + "bytes", + "log", + "rand 0.8.5", + "rtcp", + "rtp", + "thiserror", + "tokio", + "waitgroup", + "webrtc-srtp", + "webrtc-util", +] + [[package]] name = "io-lifetimes" version = "1.0.10" @@ -1904,7 +2259,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd302af1b90f2463a98fa5ad469fc212c8e3175a41c3068601bfa2727591c5be" dependencies = [ - "socket2 0.4.9", + "socket2", "widestring", "winapi", "winreg", @@ -1978,9 +2333,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.146" +version = "0.2.144" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f92be4933c13fd498862a9e02a3055f8a8d9c039ce33db97306fd5a6caa7f29b" +checksum = "2b00cc1c228a6782d0f076e7b232802e0c5689d41bb5df366f2a6b6621cfdfe1" [[package]] name = "libm" @@ -1990,8 +2345,9 @@ checksum = "7fc7aa29613bd6a620df431842069224d8bc9011086b1db4c0e0cd47fa03ec9a" [[package]] name = "libp2p" -version = "0.52.0" -source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" +version = "0.51.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f210d259724eae82005b5c48078619b7745edb7b76de370b03f8ba59ea103097" dependencies = [ "bytes", "futures", @@ -2011,10 +2367,12 @@ dependencies = [ "libp2p-metrics", "libp2p-noise", "libp2p-ping", + "libp2p-quic", "libp2p-relay", "libp2p-rendezvous", "libp2p-swarm", "libp2p-tcp", + "libp2p-webrtc", "libp2p-yamux", "multiaddr", "pin-project", @@ -2022,8 +2380,9 @@ dependencies = [ [[package]] name = "libp2p-allow-block-list" -version = "0.2.0" -source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "510daa05efbc25184458db837f6f9a5143888f1caa742426d92e1833ddd38a50" dependencies = [ "libp2p-core", "libp2p-identity", @@ -2033,8 +2392,9 @@ dependencies = [ [[package]] name = "libp2p-autonat" -version = "0.11.0" -source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6ff5fc529665c9abf4e642fb28c0efd83536f6216cc3abf28e37a011a2d6dc5" dependencies = [ "async-trait", "futures", @@ -2051,8 +2411,9 @@ dependencies = [ [[package]] name = "libp2p-connection-limits" -version = "0.2.0" -source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4caa33f1d26ed664c4fe2cca81a08c8e07d4c1c04f2f4ac7655c2dd85467fda0" dependencies = [ "libp2p-core", "libp2p-identity", @@ -2062,8 +2423,9 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.40.0" -source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" +version = "0.39.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c1df63c0b582aa434fb09b2d86897fa2b419ffeccf934b36f87fcedc8e835c2" dependencies = [ "either", "fnv", @@ -2090,12 +2452,12 @@ dependencies = [ [[package]] name = "libp2p-dns" -version = "0.40.0" -source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "146ff7034daae62077c415c2376b8057368042df6ab95f5432ad5e88568b1554" dependencies = [ "futures", "libp2p-core", - "libp2p-identity", "log", "parking_lot 0.12.1", "smallvec", @@ -2104,18 +2466,16 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" -version = "0.45.0" -source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" +version = "0.44.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eac213adad69bd9866fe87c37fbf241626715e5cd454fb6df9841aa2b02440ee" dependencies = [ "asynchronous-codec", - "base64 0.21.2", + "base64 0.21.0", "byteorder", "bytes", - "either", "fnv", "futures", - "futures-ticker", - "getrandom 0.2.9", "hex_fmt", "instant", "libp2p-core", @@ -2130,14 +2490,16 @@ dependencies = [ "serde", "sha2 0.10.6", "smallvec", + "thiserror", "unsigned-varint", - "void", + "wasm-timer", ] [[package]] name = "libp2p-identify" -version = "0.43.0" -source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5455f472243e63b9c497ff320ded0314254a9eb751799a39c283c6f20b793f3c" dependencies = [ "asynchronous-codec", "either", @@ -2157,13 +2519,14 @@ dependencies = [ [[package]] name = "libp2p-identity" -version = "0.2.0" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93a7b3534b84fe89985d8076246806c845e716d0cdac50925dcf65caf49ab3c0" +checksum = "9e2d584751cecb2aabaa56106be6be91338a60a0f4e420cf2af639204f596fc1" dependencies = [ - "bs58 0.5.0", + "bs58", "ed25519-dalek", "log", + "multiaddr", "multihash", "quick-protobuf", "rand 0.8.5", @@ -2175,8 +2538,9 @@ dependencies = [ [[package]] name = "libp2p-kad" -version = "0.44.0" -source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" +version = "0.43.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39d5ef876a2b2323d63c258e63c2f8e36f205fe5a11f0b3095d59635650790ff" dependencies = [ "arrayvec 0.7.2", "asynchronous-codec", @@ -2203,8 +2567,9 @@ dependencies = [ [[package]] name = "libp2p-mdns" -version = "0.44.0" -source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" +version = "0.43.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19983e1f949f979a928f2c603de1cf180cc0dc23e4ac93a62651ccb18341460b" dependencies = [ "data-encoding", "futures", @@ -2215,7 +2580,7 @@ dependencies = [ "log", "rand 0.8.5", "smallvec", - "socket2 0.5.3", + "socket2", "tokio", "trust-dns-proto", "void", @@ -2223,24 +2588,23 @@ dependencies = [ [[package]] name = "libp2p-metrics" -version = "0.13.0" -source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a42ec91e227d7d0dafa4ce88b333cdf5f277253873ab087555c92798db2ddd46" dependencies = [ - "instant", "libp2p-core", "libp2p-identify", - "libp2p-identity", "libp2p-ping", "libp2p-relay", "libp2p-swarm", - "once_cell", "prometheus-client", ] [[package]] name = "libp2p-noise" -version = "0.43.0" -source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c3673da89d29936bc6435bafc638e2f184180d554ce844db65915113f86ec5e" dependencies = [ "bytes", "curve25519-dalek 3.2.0", @@ -2255,21 +2619,21 @@ dependencies = [ "snow", "static_assertions 1.1.0", "thiserror", - "x25519-dalek", + "x25519-dalek 1.1.1", "zeroize", ] [[package]] name = "libp2p-ping" -version = "0.43.0" -source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e57759c19c28a73ef1eb3585ca410cefb72c1a709fcf6de1612a378e4219202" dependencies = [ "either", "futures", "futures-timer", "instant", "libp2p-core", - "libp2p-identity", "libp2p-swarm", "log", "rand 0.8.5", @@ -2278,8 +2642,9 @@ dependencies = [ [[package]] name = "libp2p-plaintext" -version = "0.40.0" -source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" +version = "0.39.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ff582c0b74ffda004b716b97bc9cfe7f39960204877758b876dde86093beaa8" dependencies = [ "asynchronous-codec", "bytes", @@ -2289,12 +2654,14 @@ dependencies = [ "log", "quick-protobuf", "unsigned-varint", + "void", ] [[package]] name = "libp2p-quic" -version = "0.8.0-alpha" -source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" +version = "0.7.0-alpha.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6b26abd81cd2398382a1edfe739b539775be8a90fa6914f39b2ab49571ec735" dependencies = [ "bytes", "futures", @@ -2307,15 +2674,16 @@ dependencies = [ "parking_lot 0.12.1", "quinn-proto", "rand 0.8.5", - "rustls 0.21.1", + "rustls 0.20.8", "thiserror", "tokio", ] [[package]] name = "libp2p-relay" -version = "0.16.0" -source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23f34cef39bbc4d020a1e538e2af2bdd707143569de87e7ce6f1500373db0b41" dependencies = [ "asynchronous-codec", "bytes", @@ -2337,8 +2705,9 @@ dependencies = [ [[package]] name = "libp2p-rendezvous" -version = "0.13.0" -source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "633f2dc23d63ad04955642f3025e740a943da4deb79b252b5fcf882208164467" dependencies = [ "asynchronous-codec", "bimap", @@ -2358,8 +2727,9 @@ dependencies = [ [[package]] name = "libp2p-request-response" -version = "0.25.0" -source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ffdb374267d42dc5ed5bc53f6e601d4a64ac5964779c6e40bb9e4f14c1e30d5" dependencies = [ "async-trait", "futures", @@ -2367,16 +2737,15 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm", - "log", "rand 0.8.5", "smallvec", - "void", ] [[package]] name = "libp2p-swarm" -version = "0.43.0" -source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd1e223f02fcd7e3790f9b954e2e81791810e3512daeb27fa97df7652e946bc2" dependencies = [ "async-std", "either", @@ -2388,8 +2757,6 @@ dependencies = [ "libp2p-identity", "libp2p-swarm-derive", "log", - "multistream-select", - "once_cell", "rand 0.8.5", "smallvec", "tokio", @@ -2398,20 +2765,20 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" -version = "0.33.0" -source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fba456131824ab6acd4c7bf61e9c0f0a3014b5fc9868ccb8e10d344594cdc4f" dependencies = [ "heck", - "proc-macro-warning", - "proc-macro2", "quote", - "syn 2.0.18", + "syn 1.0.109", ] [[package]] name = "libp2p-swarm-test" -version = "0.2.0" -source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f6cb31f26505d134ce7106f419de02a8d9640ea56d92f751138933fbec8184d" dependencies = [ "async-trait", "futures", @@ -2428,8 +2795,9 @@ dependencies = [ [[package]] name = "libp2p-tcp" -version = "0.40.0" -source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33d33698596d7722d85d3ab0c86c2c322254fce1241e91208e3679b4eb3026cf" dependencies = [ "async-io", "futures", @@ -2437,34 +2805,66 @@ dependencies = [ "if-watch", "libc", "libp2p-core", - "libp2p-identity", "log", - "socket2 0.5.3", + "socket2", "tokio", ] [[package]] name = "libp2p-tls" -version = "0.2.0" -source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff08d13d0dc66e5e9ba6279c1de417b84fa0d0adc3b03e5732928c180ec02781" dependencies = [ "futures", "futures-rustls", "libp2p-core", "libp2p-identity", - "rcgen", + "rcgen 0.10.0", "ring", - "rustls 0.21.1", + "rustls 0.20.8", "thiserror", - "webpki", - "x509-parser", + "webpki 0.22.0", + "x509-parser 0.14.0", "yasna", ] +[[package]] +name = "libp2p-webrtc" +version = "0.4.0-alpha.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dba48592edbc2f60b4bc7c10d65445b0c3964c07df26fdf493b6880d33be36f8" +dependencies = [ + "async-trait", + "asynchronous-codec", + "bytes", + "futures", + "futures-timer", + "hex", + "if-watch", + "libp2p-core", + "libp2p-identity", + "libp2p-noise", + "log", + "multihash", + "quick-protobuf", + "quick-protobuf-codec", + "rand 0.8.5", + "rcgen 0.9.3", + "serde", + "stun", + "thiserror", + "tinytemplate", + "tokio", + "tokio-util", + "webrtc", +] + [[package]] name = "libp2p-yamux" -version = "0.44.0" -source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" +version = "0.43.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dcd21d950662700a385d4c6d68e2f5f54d778e97068cdd718522222ef513bda" dependencies = [ "futures", "libp2p-core", @@ -2520,10 +2920,11 @@ dependencies = [ [[package]] name = "log" -version = "0.4.18" +version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "518ef76f2f87365916b142844c16d8fefd85039bc5699050210a7778ee1cd1de" +checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" dependencies = [ + "cfg-if", "value-bag", ] @@ -2578,6 +2979,15 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +[[package]] +name = "memoffset" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" +dependencies = [ + "autocfg", +] + [[package]] name = "memoffset" version = "0.8.0" @@ -2643,14 +3053,14 @@ dependencies = [ [[package]] name = "multiaddr" -version = "0.18.0" +version = "0.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92a651988b3ed3ad1bc8c87d016bb92f6f395b84ed1db9b926b32b1fc5a2c8b5" +checksum = "2b36f567c7099511fa8612bbbb52dda2419ce0bdbacf31714e3a5ffdb766d3bd" dependencies = [ "arrayref", "byteorder", "data-encoding", - "libp2p-identity", + "log", "multibase", "multihash", "percent-encoding", @@ -2673,19 +3083,38 @@ dependencies = [ [[package]] name = "multihash" -version = "0.19.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fd59dcc2bbe70baabeac52cd22ae52c55eefe6c38ff11a9439f16a350a939f2" +checksum = "835d6ff01d610179fbce3de1694d007e500bf33a7f29689838941d6bf783ae40" dependencies = [ "core2", + "digest 0.10.6", + "multihash-derive", "serde", + "serde-big-array", + "sha2 0.10.6", "unsigned-varint", ] +[[package]] +name = "multihash-derive" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6d4752e6230d8ef7adf7bd5d8c4b1f6561c1014c5ba9a37445ccefe18aa1db" +dependencies = [ + "proc-macro-crate", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", + "synstructure", +] + [[package]] name = "multistream-select" -version = "0.13.0" -source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8552ab875c1313b97b8d20cb857b9fd63e2d1d6a0a1b53ce9821e575405f27a" dependencies = [ "bytes", "futures", @@ -2771,6 +3200,7 @@ dependencies = [ "bitflags", "cfg-if", "libc", + "memoffset 0.6.5", ] [[package]] @@ -2829,20 +3259,29 @@ dependencies = [ "libc", ] +[[package]] +name = "oid-registry" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38e20717fa0541f39bd146692035c37bedfa532b3e5071b35761082407546b2a" +dependencies = [ + "asn1-rs 0.3.1", +] + [[package]] name = "oid-registry" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bedf36ffb6ba96c2eb7144ef6270557b52e54b20c0a8e1eb2ff99a6c6959bff" dependencies = [ - "asn1-rs", + "asn1-rs 0.5.2", ] [[package]] name = "once_cell" -version = "1.18.0" +version = "1.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" +checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" [[package]] name = "opaque-debug" @@ -2862,6 +3301,17 @@ version = "6.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ceedf44fb00f2d1984b0bc98102627ce622e083e49a5bacdb3e514fa4238e267" +[[package]] +name = "p256" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" +dependencies = [ + "ecdsa", + "elliptic-curve", + "sha2 0.10.6", +] + [[package]] name = "p2panda-rs" version = "0.7.0" @@ -2891,6 +3341,17 @@ dependencies = [ "yasmf-hash", ] +[[package]] +name = "p384" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfc8c5bf642dde52bb9e87c0ecd8ca5a76faac2eeed98dedb7c717997e1080aa" +dependencies = [ + "ecdsa", + "elliptic-curve", + "sha2 0.10.6", +] + [[package]] name = "packed_simd_2" version = "0.3.8" @@ -2970,6 +3431,15 @@ dependencies = [ "base64 0.13.1", ] +[[package]] +name = "pem-rfc7468" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d159833a9105500e0398934e205e0773f0b27529557134ecfc51c27646adac" +dependencies = [ + "base64ct", +] + [[package]] name = "percent-encoding" version = "2.2.0" @@ -3006,7 +3476,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.18", + "syn 2.0.15", ] [[package]] @@ -3022,22 +3492,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.0" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c95a7476719eab1e366eaf73d0260af3021184f18177925b07f54b30089ceead" +checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.0" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" +checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", - "syn 2.0.18", + "syn 1.0.109", ] [[package]] @@ -3052,6 +3522,16 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkcs8" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" +dependencies = [ + "der", + "spki", +] + [[package]] name = "pkg-config" version = "0.3.27" @@ -3088,7 +3568,7 @@ checksum = "048aeb476be11a4b6ca432ca569e375810de9294ae78f4774e78ea98a9246ede" dependencies = [ "cpufeatures", "opaque-debug", - "universal-hash", + "universal-hash 0.4.1", ] [[package]] @@ -3100,7 +3580,19 @@ dependencies = [ "cfg-if", "cpufeatures", "opaque-debug", - "universal-hash", + "universal-hash 0.4.1", +] + +[[package]] +name = "polyval" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ef234e08c11dfcb2e56f79fd70f6f2eb7f025c0ce2333e82f4f0518ecad30c6" +dependencies = [ + "cfg-if", + "cpufeatures", + "opaque-debug", + "universal-hash 0.5.0", ] [[package]] @@ -3143,31 +3635,20 @@ dependencies = [ "version_check", ] -[[package]] -name = "proc-macro-warning" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70550716265d1ec349c41f70dd4f964b4fd88394efe4405f0c1da679c4799a07" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.18", -] - [[package]] name = "proc-macro2" -version = "1.0.60" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dec2b086b7a862cf4de201096214fa870344cf922b2b30c167badb3af3195406" +checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" dependencies = [ "unicode-ident", ] [[package]] name = "prometheus-client" -version = "0.21.1" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78c2f43e8969d51935d2a7284878ae053ba30034cd563f673cde37ba5205685e" +checksum = "5d6fa99d535dd930d1249e6c79cb3c2915f9172a540fe2b02a4c8f9ca954721e" dependencies = [ "dtoa", "itoa", @@ -3203,8 +3684,9 @@ dependencies = [ [[package]] name = "quick-protobuf-codec" -version = "0.2.0" -source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1693116345026436eb2f10b677806169c1a1260c1c60eaaffe3fb5a29ae23d8b" dependencies = [ "asynchronous-codec", "bytes", @@ -3215,26 +3697,27 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.10.1" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85af4ed6ee5a89f26a26086e9089a6643650544c025158449a3626ebf72884b3" +checksum = "67c10f662eee9c94ddd7135043e544f3c82fa839a1e7b865911331961b53186c" dependencies = [ "bytes", "rand 0.8.5", "ring", "rustc-hash", - "rustls 0.21.1", + "rustls 0.20.8", "slab", "thiserror", "tinyvec", "tracing", + "webpki 0.22.0", ] [[package]] name = "quote" -version = "1.0.28" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" +checksum = "8f4f29d145265ec1c483c7c654450edde0bfe043d3938d6972630663356d9500" dependencies = [ "proc-macro2", ] @@ -3332,6 +3815,19 @@ dependencies = [ "num_cpus", ] +[[package]] +name = "rcgen" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6413f3de1edee53342e6138e75b56d32e7bc6e332b3bd62d497b1929d4cfbcdd" +dependencies = [ + "pem", + "ring", + "time", + "x509-parser 0.13.2", + "yasna", +] + [[package]] name = "rcgen" version = "0.10.0" @@ -3375,9 +3871,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.8.4" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f" +checksum = "af83e617f331cc6ae2da5443c602dfa5af81e517212d9d611a5b3ba1777b5370" dependencies = [ "aho-corasick", "memchr", @@ -3386,9 +3882,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.7.2" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78" +checksum = "a5996294f19bd3aae0453a862ad728f60e6600695733dd5df01da90c54363a3c" [[package]] name = "reqwest" @@ -3396,7 +3892,7 @@ version = "0.11.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13293b639a097af28fc8a90f22add145a9c954e49d77da06263d58cf44d5fb91" dependencies = [ - "base64 0.21.2", + "base64 0.21.0", "bytes", "encoding_rs", "futures-core", @@ -3436,6 +3932,17 @@ dependencies = [ "quick-error", ] +[[package]] +name = "rfc6979" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" +dependencies = [ + "crypto-bigint", + "hmac 0.12.1", + "zeroize", +] + [[package]] name = "ring" version = "0.16.20" @@ -3525,6 +4032,17 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "rtcp" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1919efd6d4a6a85d13388f9487549bb8e359f17198cc03ffd72f79b553873691" +dependencies = [ + "bytes", + "thiserror", + "webrtc-util", +] + [[package]] name = "rtnetlink" version = "0.10.1" @@ -3541,6 +4059,20 @@ dependencies = [ "tokio", ] +[[package]] +name = "rtp" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2a095411ff00eed7b12e4c6a118ba984d113e1079582570d56a5ee723f11f80" +dependencies = [ + "async-trait", + "bytes", + "rand 0.8.5", + "serde", + "thiserror", + "webrtc-util", +] + [[package]] name = "rustc-hash" version = "1.1.0" @@ -3581,26 +4113,27 @@ dependencies = [ [[package]] name = "rustls" -version = "0.20.8" +version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" +checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" dependencies = [ + "base64 0.13.1", "log", "ring", - "sct", - "webpki", + "sct 0.6.1", + "webpki 0.21.4", ] [[package]] name = "rustls" -version = "0.21.1" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c911ba11bc8433e811ce56fde130ccf32f5127cab0e0194e9c68c5a5b671791e" +checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" dependencies = [ "log", "ring", - "rustls-webpki", - "sct", + "sct 0.7.0", + "webpki 0.22.0", ] [[package]] @@ -3609,29 +4142,20 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" dependencies = [ - "base64 0.21.2", + "base64 0.21.0", ] [[package]] -name = "rustls-webpki" -version = "0.100.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6207cd5ed3d8dca7816f8f3725513a34609c0c765bf652b8c3cb4cfd87db46b" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "rustversion" -version = "1.0.12" +name = "rustversion" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" [[package]] name = "rw-stream-sink" -version = "0.4.0" -source = "git+https://github.com/libp2p/rust-libp2p#2910c985e8ab75b52d36701b9c7717ebf0fd8585" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26338f5e09bb721b85b135ea05af7767c90b52f6de4f087d4f4a3a9d64e7dc04" dependencies = [ "futures", "pin-project", @@ -3650,6 +4174,16 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +[[package]] +name = "sct" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "sct" version = "0.7.0" @@ -3660,6 +4194,32 @@ dependencies = [ "untrusted", ] +[[package]] +name = "sdp" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d22a5ef407871893fd72b4562ee15e4742269b173959db4b8df6f538c414e13" +dependencies = [ + "rand 0.8.5", + "substring", + "thiserror", + "url", +] + +[[package]] +name = "sec1" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" +dependencies = [ + "base16ct", + "der", + "generic-array", + "pkcs8", + "subtle", + "zeroize", +] + [[package]] name = "semver" version = "1.0.17" @@ -3675,6 +4235,15 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "serde-big-array" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd31f59f6fe2b0c055371bb2f16d7f0aa7d8881676c04a55b1596d1a17cd10a4" +dependencies = [ + "serde", +] + [[package]] name = "serde-wasm-bindgen" version = "0.4.5" @@ -3713,7 +4282,7 @@ checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.18", + "syn 2.0.15", ] [[package]] @@ -3748,6 +4317,19 @@ dependencies = [ "serde", ] +[[package]] +name = "sha-1" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" +dependencies = [ + "block-buffer 0.9.0", + "cfg-if", + "cpufeatures", + "digest 0.9.0", + "opaque-debug", +] + [[package]] name = "sha1" version = "0.10.5" @@ -3807,6 +4389,10 @@ name = "signature" version = "1.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" +dependencies = [ + "digest 0.10.6", + "rand_core 0.6.4", +] [[package]] name = "slab" @@ -3867,7 +4453,7 @@ version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ccba027ba85743e09d15c03296797cad56395089b832b48b5a5217880f57733" dependencies = [ - "aes-gcm", + "aes-gcm 0.9.4", "blake2", "chacha20poly1305", "curve25519-dalek 4.0.0-rc.1", @@ -3888,16 +4474,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "socket2" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2538b18701741680e0322a2302176d3253a35388e2e62f172f64f4f16605f877" -dependencies = [ - "libc", - "windows-sys 0.48.0", -] - [[package]] name = "spin" version = "0.5.2" @@ -3913,6 +4489,16 @@ dependencies = [ "lock_api", ] +[[package]] +name = "spki" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" +dependencies = [ + "base64ct", + "der", +] + [[package]] name = "sqlformat" version = "0.2.1" @@ -3961,7 +4547,7 @@ dependencies = [ "hashlink", "hex", "hkdf", - "hmac", + "hmac 0.12.1", "indexmap", "itoa", "libc", @@ -4048,6 +4634,34 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +[[package]] +name = "stun" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7e94b1ec00bad60e6410e058b52f1c66de3dc5fe4d62d09b3e52bb7d3b73e25" +dependencies = [ + "base64 0.13.1", + "crc", + "lazy_static", + "md-5", + "rand 0.8.5", + "ring", + "subtle", + "thiserror", + "tokio", + "url", + "webrtc-util", +] + +[[package]] +name = "substring" +version = "1.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42ee6433ecef213b2e72f587ef64a2f5943e7cd16fbd82dbe8bc07486c534c86" +dependencies = [ + "autocfg", +] + [[package]] name = "subtle" version = "2.4.1" @@ -4067,9 +4681,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.18" +version = "2.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32d41677bcbe24c20c52e7c70b0d8db04134c5d1066bf98662e2871ad200ea3e" +checksum = "a34fcf3e8b60f57e6a14301a2e916d323af98b0ea63c599441eec8558660c822" dependencies = [ "proc-macro2", "quote", @@ -4154,7 +4768,7 @@ checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.18", + "syn 2.0.15", ] [[package]] @@ -4184,6 +4798,16 @@ dependencies = [ "time-core", ] +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "tinyvec" version = "1.6.0" @@ -4201,32 +4825,33 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.28.2" +version = "1.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94d7b1cfd2aa4011f2de74c2c4c63665e27a71006b0a192dcd2710272e73dfa2" +checksum = "c8e00990ebabbe4c14c08aca901caed183ecd5c09562a12c824bb53d3c3fd3af" dependencies = [ "autocfg", "bytes", "libc", + "memchr", "mio", "num_cpus", "parking_lot 0.12.1", "pin-project-lite", "signal-hook-registry", - "socket2 0.4.9", + "socket2", "tokio-macros", - "windows-sys 0.48.0", + "windows-sys 0.42.0", ] [[package]] name = "tokio-macros" -version = "2.1.0" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" +checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.18", + "syn 1.0.109", ] [[package]] @@ -4237,7 +4862,7 @@ checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ "rustls 0.20.8", "tokio", - "webpki", + "webpki 0.22.0", ] [[package]] @@ -4355,7 +4980,7 @@ checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" dependencies = [ "proc-macro2", "quote", - "syn 2.0.18", + "syn 2.0.15", ] [[package]] @@ -4391,7 +5016,7 @@ dependencies = [ "lazy_static", "rand 0.8.5", "smallvec", - "socket2 0.4.9", + "socket2", "thiserror", "tinyvec", "tokio", @@ -4444,6 +5069,25 @@ dependencies = [ "utf-8", ] +[[package]] +name = "turn" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4712ee30d123ec7ae26d1e1b218395a16c87cdbaf4b3925d170d684af62ea5e8" +dependencies = [ + "async-trait", + "base64 0.13.1", + "futures", + "log", + "md-5", + "rand 0.8.5", + "ring", + "stun", + "thiserror", + "tokio", + "webrtc-util", +] + [[package]] name = "typenum" version = "1.16.0" @@ -4517,6 +5161,16 @@ dependencies = [ "subtle", ] +[[package]] +name = "universal-hash" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d3160b73c9a19f7e2939a2fdad446c57c1bbbbf4d919d3213ff1267a580d8b5" +dependencies = [ + "crypto-common", + "subtle", +] + [[package]] name = "unsigned-varint" version = "0.7.1" @@ -4550,11 +5204,24 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" +[[package]] +name = "uuid" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dad5567ad0cf5b760e5665964bec1b47dfd077ba8a2544b513f3556d3d239a2" +dependencies = [ + "getrandom 0.2.9", +] + [[package]] name = "value-bag" -version = "1.4.0" +version = "1.0.0-alpha.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4d330786735ea358f3bc09eea4caa098569c1c93f342d9aca0514915022fe7e" +checksum = "2209b78d1249f7e6f3293657c9779fe31ced465df091bbd433a1cf88e916ec55" +dependencies = [ + "ctor", + "version_check", +] [[package]] name = "varu64" @@ -4589,6 +5256,15 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" +[[package]] +name = "waitgroup" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1f50000a783467e6c0200f9d10642f4bc424e39efc1b770203e88b488f79292" +dependencies = [ + "atomic-waker", +] + [[package]] name = "waker-fn" version = "1.1.0" @@ -4638,7 +5314,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.18", + "syn 2.0.15", "wasm-bindgen-shared", ] @@ -4672,7 +5348,7 @@ checksum = "4783ce29f09b9d93134d41297aded3a712b7b979e9c6f28c32cb88c973a94869" dependencies = [ "proc-macro2", "quote", - "syn 2.0.18", + "syn 2.0.15", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4696,6 +5372,21 @@ dependencies = [ "web-sys", ] +[[package]] +name = "wasm-timer" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be0ecb0db480561e9a7642b5d3e4187c128914e58aa84330b9493e3eb68c5e7f" +dependencies = [ + "futures", + "js-sys", + "parking_lot 0.11.2", + "pin-utils", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + [[package]] name = "web-sys" version = "0.3.62" @@ -4706,6 +5397,16 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "webpki" +version = "0.21.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "webpki" version = "0.22.0" @@ -4722,7 +5423,216 @@ version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" dependencies = [ - "webpki", + "webpki 0.22.0", +] + +[[package]] +name = "webrtc" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d3bc9049bdb2cea52f5fd4f6f728184225bdb867ed0dc2410eab6df5bdd67bb" +dependencies = [ + "arc-swap", + "async-trait", + "bytes", + "hex", + "interceptor", + "lazy_static", + "log", + "rand 0.8.5", + "rcgen 0.9.3", + "regex", + "ring", + "rtcp", + "rtp", + "rustls 0.19.1", + "sdp", + "serde", + "serde_json", + "sha2 0.10.6", + "stun", + "thiserror", + "time", + "tokio", + "turn", + "url", + "waitgroup", + "webrtc-data", + "webrtc-dtls", + "webrtc-ice", + "webrtc-mdns", + "webrtc-media", + "webrtc-sctp", + "webrtc-srtp", + "webrtc-util", +] + +[[package]] +name = "webrtc-data" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ef36a4d12baa6e842582fe9ec16a57184ba35e1a09308307b67d43ec8883100" +dependencies = [ + "bytes", + "derive_builder", + "log", + "thiserror", + "tokio", + "webrtc-sctp", + "webrtc-util", +] + +[[package]] +name = "webrtc-dtls" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "942be5bd85f072c3128396f6e5a9bfb93ca8c1939ded735d177b7bcba9a13d05" +dependencies = [ + "aes 0.6.0", + "aes-gcm 0.10.1", + "async-trait", + "bincode", + "block-modes", + "byteorder", + "ccm", + "curve25519-dalek 3.2.0", + "der-parser 8.2.0", + "elliptic-curve", + "hkdf", + "hmac 0.12.1", + "log", + "oid-registry 0.6.1", + "p256", + "p384", + "rand 0.8.5", + "rand_core 0.6.4", + "rcgen 0.9.3", + "ring", + "rustls 0.19.1", + "sec1", + "serde", + "sha1", + "sha2 0.10.6", + "signature", + "subtle", + "thiserror", + "tokio", + "webpki 0.21.4", + "webrtc-util", + "x25519-dalek 2.0.0-pre.1", + "x509-parser 0.13.2", +] + +[[package]] +name = "webrtc-ice" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "465a03cc11e9a7d7b4f9f99870558fe37a102b65b93f8045392fef7c67b39e80" +dependencies = [ + "arc-swap", + "async-trait", + "crc", + "log", + "rand 0.8.5", + "serde", + "serde_json", + "stun", + "thiserror", + "tokio", + "turn", + "url", + "uuid", + "waitgroup", + "webrtc-mdns", + "webrtc-util", +] + +[[package]] +name = "webrtc-mdns" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f08dfd7a6e3987e255c4dbe710dde5d94d0f0574f8a21afa95d171376c143106" +dependencies = [ + "log", + "socket2", + "thiserror", + "tokio", + "webrtc-util", +] + +[[package]] +name = "webrtc-media" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f72e1650a8ae006017d1a5280efb49e2610c19ccc3c0905b03b648aee9554991" +dependencies = [ + "byteorder", + "bytes", + "rand 0.8.5", + "rtp", + "thiserror", +] + +[[package]] +name = "webrtc-sctp" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d47adcd9427eb3ede33d5a7f3424038f63c965491beafcc20bc650a2f6679c0" +dependencies = [ + "arc-swap", + "async-trait", + "bytes", + "crc", + "log", + "rand 0.8.5", + "thiserror", + "tokio", + "webrtc-util", +] + +[[package]] +name = "webrtc-srtp" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6183edc4c1c6c0175f8812eefdce84dfa0aea9c3ece71c2bf6ddd3c964de3da5" +dependencies = [ + "aead 0.4.3", + "aes 0.7.5", + "aes-gcm 0.9.4", + "async-trait", + "byteorder", + "bytes", + "ctr 0.8.0", + "hmac 0.11.0", + "log", + "rtcp", + "rtp", + "sha-1", + "subtle", + "thiserror", + "tokio", + "webrtc-util", +] + +[[package]] +name = "webrtc-util" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93f1db1727772c05cf7a2cfece52c3aca8045ca1e176cd517d323489aa3c6d87" +dependencies = [ + "async-trait", + "bitflags", + "bytes", + "cc", + "ipnet", + "lazy_static", + "libc", + "log", + "nix", + "rand 0.8.5", + "thiserror", + "tokio", + "winapi", ] [[package]] @@ -4785,6 +5695,21 @@ dependencies = [ "windows_x86_64_msvc 0.34.0", ] +[[package]] +name = "windows-sys" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + [[package]] name = "windows-sys" version = "0.45.0" @@ -4967,18 +5892,49 @@ dependencies = [ "zeroize", ] +[[package]] +name = "x25519-dalek" +version = "2.0.0-pre.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5da623d8af10a62342bcbbb230e33e58a63255a58012f8653c578e54bab48df" +dependencies = [ + "curve25519-dalek 3.2.0", + "rand_core 0.6.4", + "zeroize", +] + [[package]] name = "x509-parser" -version = "0.15.0" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab0c2f54ae1d92f4fcb99c0b7ccf0b1e3451cbd395e5f115ccbdbcb18d4f634" +checksum = "9fb9bace5b5589ffead1afb76e43e34cff39cd0f3ce7e170ae0c29e53b88eb1c" dependencies = [ - "asn1-rs", + "asn1-rs 0.3.1", + "base64 0.13.1", + "data-encoding", + "der-parser 7.0.0", + "lazy_static", + "nom", + "oid-registry 0.4.0", + "ring", + "rusticata-macros", + "thiserror", + "time", +] + +[[package]] +name = "x509-parser" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0ecbeb7b67ce215e40e3cc7f2ff902f94a223acf44995934763467e7b1febc8" +dependencies = [ + "asn1-rs 0.5.2", + "base64 0.13.1", "data-encoding", - "der-parser", + "der-parser 8.2.0", "lazy_static", "nom", - "oid-registry", + "oid-registry 0.6.1", "rusticata-macros", "thiserror", "time", @@ -5040,5 +5996,5 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.18", + "syn 2.0.15", ] diff --git a/aquadoggo/Cargo.toml b/aquadoggo/Cargo.toml index 4638775a0..0745042cd 100644 --- a/aquadoggo/Cargo.toml +++ b/aquadoggo/Cargo.toml @@ -33,7 +33,7 @@ envy = "0.4.2" futures = "0.3.23" hex = "0.4.3" http = "0.2.9" -libp2p = { git = "https://github.com/libp2p/rust-libp2p", features = [ +libp2p = { version = "0.51.3", features = [ "autonat", "identify", "macros", @@ -46,10 +46,10 @@ libp2p = { git = "https://github.com/libp2p/rust-libp2p", features = [ "tokio", "yamux", ] } -libp2p-quic = { git = "https://github.com/libp2p/rust-libp2p", features = ["tokio"] } +libp2p-quic = { version = "0.7.0-alpha.3", features = ["tokio"] } lipmaa-link = "0.2.2" log = "0.4.17" -once_cell = "1.18.0" +once_cell = "1.17.0" openssl-probe = "0.1.5" p2panda-rs = { git = "https://github.com/p2panda/p2panda", rev = "ae60bf754a60a1daf9c6862e9ae51ee7501b007f", features = [ "storage-provider", @@ -82,7 +82,7 @@ ctor = "0.1.23" env_logger = "0.9.0" http = "0.2.9" hyper = "0.14.19" -libp2p-swarm-test = { git = "https://github.com/libp2p/rust-libp2p" } +libp2p-swarm-test = "0.1.0" once_cell = "1.17.0" p2panda-rs = { git = "https://github.com/p2panda/p2panda", rev = "ae60bf754a60a1daf9c6862e9ae51ee7501b007f", features = [ "test-utils", diff --git a/aquadoggo/src/network/identity.rs b/aquadoggo/src/network/identity.rs index 04ae2043b..a5594deef 100644 --- a/aquadoggo/src/network/identity.rs +++ b/aquadoggo/src/network/identity.rs @@ -47,8 +47,12 @@ impl Identity for Keypair { /// Encode the private key as a hex string and save it to the given file path. // See: https://github.com/p2panda/aquadoggo/issues/295 + #[allow(deprecated)] fn save(&self, path: &Path) -> Result<()> { - let encoded_private_key = hex::encode(self.key_pair().try_into_ed25519()?.secret()); + let private_key = match self { + Keypair::Ed25519(key_pair) => key_pair.secret(), + }; + let encoded_private_key = hex::encode(private_key); fs::create_dir_all(path.parent().unwrap())?; let mut file = File::create(path)?; @@ -64,6 +68,7 @@ impl Identity for Keypair { /// Load a key pair from file at the given path. // See: https://github.com/p2panda/aquadoggo/issues/295 + #[allow(deprecated)] fn load(path: &Path) -> Result where Self: Sized, @@ -73,7 +78,8 @@ impl Identity for Keypair { file.read_to_string(&mut contents)?; let private_key_bytes = hex::decode(contents)?; - let key_pair = Keypair::ed25519_from_bytes(private_key_bytes)?; + let private_key = ed25519::SecretKey::from_bytes(private_key_bytes)?; + let key_pair = Keypair::Ed25519(private_key.into()); Ok(key_pair) } diff --git a/aquadoggo/src/network/replication/behaviour.rs b/aquadoggo/src/network/replication/behaviour.rs index f18658cf4..ab1807f8d 100644 --- a/aquadoggo/src/network/replication/behaviour.rs +++ b/aquadoggo/src/network/replication/behaviour.rs @@ -1,6 +1,6 @@ // SPDX-License-Identifier: AGPL-3.0-or-later -use std::collections::{HashMap, VecDeque}; +use std::collections::VecDeque; use std::task::{Context, Poll}; use libp2p::core::Endpoint; @@ -10,11 +10,10 @@ use libp2p::swarm::{ PollParameters, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; use libp2p::{Multiaddr, PeerId}; -use log::{debug, trace, warn}; +use log::trace; use p2panda_rs::Human; -use crate::network::replication::handler::{Handler, HandlerFromBehaviour, HandlerToBehaviour}; -use crate::replication::errors::ConnectionError; +use crate::network::replication::handler::{Handler, HandlerInEvent, HandlerOutEvent}; use crate::replication::SyncMessage; #[derive(Debug)] @@ -31,36 +30,16 @@ pub enum Event { #[derive(Debug)] pub struct Behaviour { - events: VecDeque>, - inbound_connections: HashMap, - outbound_connections: HashMap, + events: VecDeque>, } impl Behaviour { pub fn new() -> Self { Self { events: VecDeque::new(), - inbound_connections: HashMap::new(), - outbound_connections: HashMap::new(), } } - fn set_inbound_connection(&mut self, peer_id: PeerId, connection_id: ConnectionId) -> bool { - if self.inbound_connections.get(&peer_id).is_some() { - return false; - } - self.inbound_connections.insert(peer_id, connection_id); - true - } - - fn set_outbound_connection(&mut self, peer_id: PeerId, connection_id: ConnectionId) -> bool { - if self.outbound_connections.get(&peer_id).is_some() { - return false; - } - self.outbound_connections.insert(peer_id, connection_id); - true - } - fn handle_received_message(&mut self, peer_id: &PeerId, message: SyncMessage) { trace!( "Notify swarm of received sync message: {peer_id} {}", @@ -79,7 +58,7 @@ impl Behaviour { ); self.events.push_back(ToSwarm::NotifyHandler { peer_id, - event: HandlerFromBehaviour::Message(message), + event: HandlerInEvent::Message(message), handler: NotifyHandler::Any, }); } @@ -88,7 +67,7 @@ impl Behaviour { pub fn handle_error(&mut self, peer_id: PeerId) { self.events.push_back(ToSwarm::NotifyHandler { peer_id, - event: HandlerFromBehaviour::ReplicationError, + event: HandlerInEvent::ReplicationError, handler: NotifyHandler::Any, }); } @@ -97,76 +76,38 @@ impl Behaviour { impl NetworkBehaviour for Behaviour { type ConnectionHandler = Handler; - type ToSwarm = Event; + type OutEvent = Event; fn handle_established_inbound_connection( &mut self, - connection_id: ConnectionId, - peer_id: PeerId, - _local_addr: &Multiaddr, - remote_address: &Multiaddr, + _: ConnectionId, + _: PeerId, + _: &Multiaddr, + _: &Multiaddr, ) -> Result, ConnectionDenied> { - // We only want max one inbound connection per peer, so reject this connection if we - // already have one assigned. - if self.inbound_connections.get(&peer_id).is_some() { - debug!("Connection denied: inbound connection already exists for: {peer_id}"); - return Err(ConnectionDenied::new( - ConnectionError::MultipleInboundConnections(peer_id.to_owned()), - )); - } - debug!( - "New connection: established inbound connection with peer: {peer_id} {remote_address}" - ); - self.set_inbound_connection(peer_id, connection_id); Ok(Handler::new()) } fn handle_established_outbound_connection( &mut self, - connection_id: ConnectionId, - peer_id: PeerId, + _: ConnectionId, + _: PeerId, _: &Multiaddr, _: Endpoint, ) -> Result, ConnectionDenied> { - // We only want max one outbound connection per peer, so reject this connection if we - // already have one assigned. - if self.outbound_connections.get(&peer_id).is_some() { - debug!("Connection denied: outbound connection already exists for: {peer_id}"); - return Err(ConnectionDenied::new( - ConnectionError::MultipleOutboundConnections(peer_id), - )); - } - debug!("New connection: established outbound connection with peer: {peer_id}"); - self.set_outbound_connection(peer_id, connection_id); Ok(Handler::new()) } fn on_connection_handler_event( &mut self, - peer_id: PeerId, - connection_id: ConnectionId, + peer: PeerId, + _connection_id: ConnectionId, handler_event: THandlerOutEvent, ) { - // We only want to process messages which arrive for connections we have assigned to this peer. - let mut current_inbound = false; - let mut current_outbound = false; - - if let Some(inbound_connection_id) = self.inbound_connections.get(&peer_id) { - current_inbound = *inbound_connection_id == connection_id; - } - - if let Some(outbound_connection_id) = self.outbound_connections.get(&peer_id) { - current_outbound = *outbound_connection_id == connection_id; - } - - if current_inbound || current_outbound { - match handler_event { - HandlerToBehaviour::Message(message) => { - self.handle_received_message(&peer_id, message); - } + match handler_event { + HandlerOutEvent::Message(message) => { + self.handle_received_message(&peer, message); } - } else { - debug!("Message ignored: message arrived on an unknown connection for: {peer_id}"); } } @@ -174,82 +115,22 @@ impl NetworkBehaviour for Behaviour { match event { FromSwarm::ConnectionClosed(ConnectionClosed { peer_id, - connection_id, + remaining_established, .. }) => { - let inbound = self.inbound_connections.get(&peer_id); - let outbound = self.outbound_connections.get(&peer_id); - - match (inbound, outbound) { - // An inbound and outbound connection exists for this peer - (Some(inbound_connection_id), Some(outbound_connection_id)) => { - if *outbound_connection_id == connection_id { - debug!( - "Remove connections: remove outbound connection with peer: {peer_id}" - ); - self.outbound_connections.remove(&peer_id); - } - - if *inbound_connection_id == connection_id { - debug!( - "Remove connections: remove inbound connection with peer: {peer_id}" - ); - self.inbound_connections.remove(&peer_id); - } - } - // Only an outbound connection exists - (None, Some(outbound_connection_id)) => { - debug!( - "Remove connections: remove outbound connection with peer: {peer_id}" - ); - if *outbound_connection_id == connection_id { - self.outbound_connections.remove(&peer_id); - self.events - .push_back(ToSwarm::GenerateEvent(Event::PeerDisconnected( - peer_id, - ))); - } - } - // Only an inbound connection exists, - (Some(inbound_connection_id), None) => { - debug!( - "Remove connections: remove inbound connection with peer: {peer_id}" - ); - if *inbound_connection_id == connection_id { - self.inbound_connections.remove(&peer_id); - self.events - .push_back(ToSwarm::GenerateEvent(Event::PeerDisconnected( - peer_id, - ))); - } - } - (None, None) => { - warn!("Attempted to disconnect a peer with no known connections"); - } + if remaining_established == 0 { + self.events + .push_back(ToSwarm::GenerateEvent(Event::PeerDisconnected(peer_id))); } } FromSwarm::ConnectionEstablished(ConnectionEstablished { peer_id, - connection_id, + other_established, .. }) => { - // We only want to issue PeerConnected messages for connections we have accepted. - let mut current_inbound = false; - let mut current_outbound = false; - - if let Some(inbound_connection_id) = self.inbound_connections.get(&peer_id) { - current_inbound = *inbound_connection_id == connection_id; - } - - if let Some(outbound_connection_id) = self.outbound_connections.get(&peer_id) { - current_outbound = *outbound_connection_id == connection_id; - } - - if current_inbound || current_outbound { + if other_established == 0 { self.events .push_back(ToSwarm::GenerateEvent(Event::PeerConnected(peer_id))); - } else { - warn!("Unknown connection: ignoring unknown connection with: {peer_id}"); } } FromSwarm::AddressChange(_) @@ -260,9 +141,8 @@ impl NetworkBehaviour for Behaviour { | FromSwarm::ExpiredListenAddr(_) | FromSwarm::ListenerError(_) | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::ExternalAddrConfirmed(_) - | FromSwarm::ExternalAddrExpired(_) => {} + | FromSwarm::NewExternalAddr(_) + | FromSwarm::ExpiredExternalAddr(_) => {} } } @@ -270,7 +150,7 @@ impl NetworkBehaviour for Behaviour { &mut self, _cx: &mut Context<'_>, _params: &mut impl PollParameters, - ) -> Poll>> { + ) -> Poll>> { if let Some(event) = self.events.pop_front() { return Poll::Ready(event); } diff --git a/aquadoggo/src/network/replication/handler.rs b/aquadoggo/src/network/replication/handler.rs index 7947ad45e..70ba09d55 100644 --- a/aquadoggo/src/network/replication/handler.rs +++ b/aquadoggo/src/network/replication/handler.rs @@ -7,14 +7,11 @@ use std::time::{Duration, Instant}; use asynchronous_codec::Framed; use futures::{Sink, StreamExt}; -use libp2p::swarm::handler::{ - ConnectionEvent, FullyNegotiatedInbound, FullyNegotiatedOutbound, ProtocolsChange, -}; +use libp2p::swarm::handler::{ConnectionEvent, FullyNegotiatedInbound, FullyNegotiatedOutbound}; use libp2p::swarm::{ - ConnectionHandler, ConnectionHandlerEvent, KeepAlive, Stream as NegotiatedStream, - SubstreamProtocol, + ConnectionHandler, ConnectionHandlerEvent, KeepAlive, NegotiatedSubstream, SubstreamProtocol, }; -use log::{debug, warn}; +use log::warn; use thiserror::Error; use crate::network::replication::{Codec, CodecError, Protocol}; @@ -87,7 +84,7 @@ impl Handler { /// An event sent from the network behaviour to the connection handler. #[derive(Debug)] -pub enum HandlerFromBehaviour { +pub enum HandlerInEvent { /// Replication message to send on outbound stream. Message(SyncMessage), @@ -99,7 +96,7 @@ pub enum HandlerFromBehaviour { /// /// This informs the network behaviour of various events created by the handler. #[derive(Debug)] -pub enum HandlerToBehaviour { +pub enum HandlerOutEvent { /// Replication message received on the inbound stream. Message(SyncMessage), } @@ -116,7 +113,7 @@ pub enum HandlerError { RemotePeerDisconnected, } -type Stream = Framed; +type Stream = Framed; /// State of the inbound substream, opened either by us or by the remote. enum InboundSubstreamState { @@ -146,8 +143,8 @@ enum OutboundSubstreamState { } impl ConnectionHandler for Handler { - type FromBehaviour = HandlerFromBehaviour; - type ToBehaviour = HandlerToBehaviour; + type InEvent = HandlerInEvent; + type OutEvent = HandlerOutEvent; type Error = HandlerError; type InboundProtocol = Protocol; type OutboundProtocol = Protocol; @@ -179,21 +176,15 @@ impl ConnectionHandler for Handler { | ConnectionEvent::ListenUpgradeError(_) => { warn!("Connection event error"); } - ConnectionEvent::LocalProtocolsChange(_) => { - debug!("ConnectionEvent: LocalProtocolsChange") - } - ConnectionEvent::RemoteProtocolsChange(_) => { - debug!("ConnectionEvent: RemoteProtocolsChange") - } } } - fn on_behaviour_event(&mut self, event: Self::FromBehaviour) { + fn on_behaviour_event(&mut self, event: Self::InEvent) { match event { - HandlerFromBehaviour::Message(message) => { + HandlerInEvent::Message(message) => { self.send_queue.push_back(message); } - HandlerFromBehaviour::ReplicationError => { + HandlerInEvent::ReplicationError => { self.critical_error = true; } } @@ -217,7 +208,7 @@ impl ConnectionHandler for Handler { ConnectionHandlerEvent< Self::OutboundProtocol, Self::OutboundOpenInfo, - Self::ToBehaviour, + Self::OutEvent, Self::Error, >, > { @@ -255,8 +246,8 @@ impl ConnectionHandler for Handler { self.inbound_substream = Some(InboundSubstreamState::WaitingInput(substream)); - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( - HandlerToBehaviour::Message(message), + return Poll::Ready(ConnectionHandlerEvent::Custom( + HandlerOutEvent::Message(message), )); } Poll::Ready(Some(Err(err))) => { diff --git a/aquadoggo/src/network/replication/protocol.rs b/aquadoggo/src/network/replication/protocol.rs index ba06e0e55..a73edca2b 100644 --- a/aquadoggo/src/network/replication/protocol.rs +++ b/aquadoggo/src/network/replication/protocol.rs @@ -9,7 +9,7 @@ use libp2p::{InboundUpgrade, OutboundUpgrade}; use crate::replication::SyncMessage; -pub const PROTOCOL_NAME: &str = "/p2p/p2panda/1.0.0"; +pub const PROTOCOL_NAME: &[u8] = b"/p2p/p2panda/1.0.0"; pub type CodecError = CborCodecError; @@ -25,11 +25,11 @@ impl Protocol { } impl UpgradeInfo for Protocol { - type Info = String; + type Info = Vec; type InfoIter = Vec; fn protocol_info(&self) -> Self::InfoIter { - vec![PROTOCOL_NAME.to_string()] + vec![PROTOCOL_NAME.to_vec()] } } diff --git a/aquadoggo/src/network/service.rs b/aquadoggo/src/network/service.rs index 9d5ae8ef1..904098708 100644 --- a/aquadoggo/src/network/service.rs +++ b/aquadoggo/src/network/service.rs @@ -3,7 +3,7 @@ use anyhow::Result; use libp2p::multiaddr::Protocol; use libp2p::ping::Event; -use libp2p::swarm::SwarmEvent; +use libp2p::swarm::{AddressScore, SwarmEvent}; use libp2p::{autonat, identify, mdns, rendezvous, Multiaddr, Swarm}; use log::{debug, info, trace, warn}; use tokio::task; @@ -18,7 +18,6 @@ use crate::network::config::NODE_NAMESPACE; use crate::network::replication; use crate::network::swarm; use crate::network::NetworkConfiguration; -use crate::replication::errors::ConnectionError; /// Network service that configures and deploys a network swarm over QUIC transports. /// @@ -182,6 +181,7 @@ impl EventLoop { // ~~~~~ // Swarm // ~~~~~ + SwarmEvent::Dialing(peer_id) => info!("Dialing: {peer_id}"), SwarmEvent::ConnectionEstablished { peer_id, num_established, @@ -220,38 +220,14 @@ impl EventLoop { address, } => trace!("ExpiredListenAddr: {listener_id:?} {address}"), SwarmEvent::IncomingConnection { - connection_id, local_addr, send_back_addr, } => debug!("IncomingConnection: {local_addr} {send_back_addr}"), SwarmEvent::IncomingConnectionError { - connection_id, local_addr, send_back_addr, error, - } => { - warn!("IncomingConnectionError: {local_addr} {send_back_addr} {error:?}"); - debug!("{:#?}", self.swarm.network_info()); - // We can access the PeerId here and call disconnect_peer_id on swarm to clean up - // any dangling connections, but there shouldn't actually be any.... and when i - // try this it doesn't have an effect (we still hit pending/established connection - // limits) - // - // match error { - // libp2p::swarm::ListenError::Denied { cause } => { - // let error = cause.downcast::(); - // if let Ok(error) = error { - // match error { - // ConnectionError::MultipleInboundConnections(peer_id) => { - // let _ = self.swarm.disconnect_peer_id(peer_id); - // } - // _ => (), - // } - // } - // } - // _ => (), - // } - } + } => warn!("IncomingConnectionError: {local_addr} {send_back_addr} {error:?}"), SwarmEvent::ListenerClosed { listener_id, addresses, @@ -266,11 +242,7 @@ impl EventLoop { } => { info!("Listening on {address}"); } - SwarmEvent::OutgoingConnectionError { - connection_id, - peer_id, - error, - } => { + SwarmEvent::OutgoingConnectionError { peer_id, error } => { warn!("OutgoingConnectionError: {peer_id:?} {error:?}"); // self.send_service_message(ServiceMessage::ConnectionError(peer_id)); } @@ -283,11 +255,11 @@ impl EventLoop { for (peer_id, multiaddr) in list { debug!("mDNS discovered a new peer: {peer_id}"); - if let Err(err) = self.swarm.dial(multiaddr) { + if let Err(err) = self.swarm.dial(peer_id) { warn!("Failed to dial: {}", err); } else { debug!("Dial success: skip remaining addresses for: {peer_id}"); - break; + break } // // Only dial the newly discovered peer if we're not already connected. @@ -313,11 +285,7 @@ impl EventLoop { // ~~~~ // Ping // ~~~~ - SwarmEvent::Behaviour(BehaviourEvent::Ping(Event { - connection, - peer, - result: _, - })) => { + SwarmEvent::Behaviour(BehaviourEvent::Ping(Event { peer, result: _ })) => { debug!("Ping from: {peer}") } @@ -344,7 +312,7 @@ impl EventLoop { if peer_id != local_peer_id { debug!("Discovered peer {peer_id} at {address}"); - let p2p_suffix = Protocol::P2p(peer_id); + let p2p_suffix = Protocol::P2p(*peer_id.as_ref()); let address_with_p2p = if !address .ends_with(&Multiaddr::empty().with(p2p_suffix.clone())) { @@ -442,7 +410,8 @@ impl EventLoop { if let Some(addr) = self.external_circuit_addr.clone() { trace!("Adding external relayed listen address: {}", addr); - self.swarm.add_external_address(addr); + self.swarm + .add_external_address(addr, AddressScore::Finite(1)); if let Some(rendezvous_peer_id) = self.network_config.rendezvous_peer_id { diff --git a/aquadoggo/src/network/transport.rs b/aquadoggo/src/network/transport.rs index 2087e4665..582d877eb 100644 --- a/aquadoggo/src/network/transport.rs +++ b/aquadoggo/src/network/transport.rs @@ -5,8 +5,10 @@ use libp2p::core::muxing::StreamMuxerBox; use libp2p::core::transport::upgrade::Version; use libp2p::core::transport::{Boxed, OrTransport}; use libp2p::identity::Keypair; -use libp2p::noise::Config as NoiseConfig; -use libp2p::yamux::Config as YamuxConfig; +#[allow(deprecated)] +use libp2p::noise::NoiseAuthenticated; +#[allow(deprecated)] +use libp2p::yamux::YamuxConfig; use libp2p::{relay, PeerId, Transport}; use libp2p_quic as quic; @@ -32,7 +34,7 @@ pub async fn build_transport( // Add encryption and multiplexing to the relay transport let relay_transport = relay_transport .upgrade(Version::V1) - .authenticate(NoiseConfig::new(key_pair).unwrap()) + .authenticate(NoiseAuthenticated::xx(key_pair).unwrap()) .multiplex(YamuxConfig::default()); // The relay transport only handles listening and dialing on a relayed Multiaddr; it depends diff --git a/aquadoggo/src/replication/errors.rs b/aquadoggo/src/replication/errors.rs index 8d4c71468..c55ae4e1c 100644 --- a/aquadoggo/src/replication/errors.rs +++ b/aquadoggo/src/replication/errors.rs @@ -1,19 +1,9 @@ // SPDX-License-Identifier: AGPL-3.0-or-later -use libp2p::PeerId; use thiserror::Error; use crate::replication::TargetSet; -#[derive(Error, Debug)] -pub enum ConnectionError { - #[error("Reject duplicate inbound connection with peer: {0}")] - MultipleInboundConnections(PeerId), - - #[error("Reject duplicate outbound connection with peer: {0}")] - MultipleOutboundConnections(PeerId), -} - #[derive(Error, Debug)] pub enum ReplicationError { #[error("Remote peer requested unsupported replication mode")] diff --git a/aquadoggo_cli/Cargo.toml b/aquadoggo_cli/Cargo.toml index b9a3f3765..585caafbc 100644 --- a/aquadoggo_cli/Cargo.toml +++ b/aquadoggo_cli/Cargo.toml @@ -20,11 +20,11 @@ path = "src/main.rs" doc = false [dependencies] -anyhow = "1.0.62" -tokio = { version = "1.25.0", features = ["full"] } -env_logger = "0.9.0" -clap = { version = "4.1.8", features = ["derive"] } -libp2p = { git = "https://github.com/libp2p/rust-libp2p" } +anyhow = "=1.0.62" +tokio = { version = "=1.25.0", features = ["full"] } +env_logger = "=0.9.0" +clap = { version = "=4.1.8", features = ["derive"] } +libp2p = "=0.51.3" [dependencies.aquadoggo] version = "~0.4.0" diff --git a/aquadoggo_cli/src/main.rs b/aquadoggo_cli/src/main.rs index 52197d749..49780fde4 100644 --- a/aquadoggo_cli/src/main.rs +++ b/aquadoggo_cli/src/main.rs @@ -2,7 +2,6 @@ #![allow(clippy::uninlined_format_args)] use std::convert::{TryFrom, TryInto}; -use std::str::FromStr; use anyhow::Result; use aquadoggo::{Configuration, NetworkConfiguration, Node}; @@ -66,34 +65,33 @@ struct Cli { impl Cli { // Run custom validators on parsed CLI input fn validate(self) -> Self { - // @TODO: This needs updating for v0.5.2 https://github.com/libp2p/rust-libp2p/pull/3656 - // // Ensure rendezvous server address includes a peer ID - // if let Some(addr) = &self.rendezvous_address { - // // Check if the given `Multiaddr` contains a `PeerId` - // if PeerId::try_from_multiaddr(addr).is_none() { - // // Print a help message about the missing value(s) and exit - // Cli::command() - // .error( - // ClapErrorKind::ValueValidation, - // "'--rendezvous-address' must include the peer ID of the server", - // ) - // .exit() - // } - // } - // - // // Ensure relay server address includes a peer ID - // if let Some(addr) = &self.relay_address { - // // Check if the given `Multiaddr` contains a `PeerId` - // if PeerId::try_from_multiaddr(addr).is_none() { - // // Print a help message about the missing value(s) and exit - // Cli::command() - // .error( - // ClapErrorKind::ValueValidation, - // "'--relay-address' must include the peer ID of the server", - // ) - // .exit() - // } - // } + // Ensure rendezvous server address includes a peer ID + if let Some(addr) = &self.rendezvous_address { + // Check if the given `Multiaddr` contains a `PeerId` + if PeerId::try_from_multiaddr(addr).is_none() { + // Print a help message about the missing value(s) and exit + Cli::command() + .error( + ClapErrorKind::ValueValidation, + "'--rendezvous-address' must include the peer ID of the server", + ) + .exit() + } + } + + // Ensure relay server address includes a peer ID + if let Some(addr) = &self.relay_address { + // Check if the given `Multiaddr` contains a `PeerId` + if PeerId::try_from_multiaddr(addr).is_none() { + // Print a help message about the missing value(s) and exit + Cli::command() + .error( + ClapErrorKind::ValueValidation, + "'--relay-address' must include the peer ID of the server", + ) + .exit() + } + } self } @@ -106,16 +104,13 @@ impl TryFrom for Configuration { let mut config = Configuration::new(cli.data_dir)?; let relay_peer_id = if let Some(addr) = &cli.relay_address { - // @TODO: This needs updating for v0.5.2 https://github.com/libp2p/rust-libp2p/pull/3656 - - Some(PeerId::from_str(&addr.into_iter().last().unwrap().to_string()).unwrap()) + PeerId::try_from_multiaddr(addr) } else { None }; let rendezvous_peer_id = if let Some(addr) = &cli.rendezvous_address { - // @TODO: This needs updating for v0.5.2 https://github.com/libp2p/rust-libp2p/pull/3656 - Some(PeerId::from_str(&addr.into_iter().last().unwrap().to_string()).unwrap()) + PeerId::try_from_multiaddr(addr) } else { None }; From 2965230485e367646ae1837a5ccade6038ce16f6 Mon Sep 17 00:00:00 2001 From: adz Date: Mon, 12 Jun 2023 16:07:33 +0200 Subject: [PATCH 103/126] Make clippy happy --- aquadoggo/src/network/service.rs | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/aquadoggo/src/network/service.rs b/aquadoggo/src/network/service.rs index 904098708..f035291ef 100644 --- a/aquadoggo/src/network/service.rs +++ b/aquadoggo/src/network/service.rs @@ -252,27 +252,15 @@ impl EventLoop { // ~~~~ SwarmEvent::Behaviour(BehaviourEvent::Mdns(event)) => match event { mdns::Event::Discovered(list) => { - for (peer_id, multiaddr) in list { + for (peer_id, _multiaddr) in list { debug!("mDNS discovered a new peer: {peer_id}"); if let Err(err) = self.swarm.dial(peer_id) { warn!("Failed to dial: {}", err); } else { debug!("Dial success: skip remaining addresses for: {peer_id}"); - break + break; } - - // // Only dial the newly discovered peer if we're not already connected. - // // - // // @TODO: Is this even a thing? Trying to catch the case where two peers - // // simultaneously discover and connect to each other. - // if !self.swarm.is_connected(&peer_id) { - // if let Err(err) = self.swarm.dial(multiaddr) { - // warn!("Failed to dial: {}", err); - // } - // } else { - // warn!("Not dialing discovered peer as connection already exists: {peer_id:?}") - // } } } mdns::Event::Expired(list) => { From 7d95942b2198b8fb027be05b9ebe7c857e63372d Mon Sep 17 00:00:00 2001 From: adz Date: Mon, 12 Jun 2023 16:07:42 +0200 Subject: [PATCH 104/126] Do never actively close connections --- aquadoggo/src/network/replication/handler.rs | 52 ++++++-------------- 1 file changed, 16 insertions(+), 36 deletions(-) diff --git a/aquadoggo/src/network/replication/handler.rs b/aquadoggo/src/network/replication/handler.rs index 70ba09d55..4bf3bf3ec 100644 --- a/aquadoggo/src/network/replication/handler.rs +++ b/aquadoggo/src/network/replication/handler.rs @@ -41,10 +41,10 @@ pub struct Handler { /// Last time we've observed inbound or outbound messaging activity. last_io_activity: Instant, - /// Flag indicating that we want to close _all_ connection handlers related to that peer. + /// Flag indicating that we want to close connection handlers related to that peer. /// /// This is useful in scenarios where a critical error occurred outside of the libp2p stack - /// (for example in the replication service) and we need to accordingly close all connections. + /// (for example in the replication service) and we need to accordingly close connections. critical_error: bool, } @@ -105,12 +105,6 @@ pub enum HandlerOutEvent { pub enum HandlerError { #[error("Failed to encode or decode CBOR")] Codec(#[from] CodecError), - - #[error("Critical replication protocol error")] - ReplicationError, - - #[error("Remote peer closed connection")] - RemotePeerDisconnected, } type Stream = Framed; @@ -191,6 +185,10 @@ impl ConnectionHandler for Handler { } fn connection_keep_alive(&self) -> KeepAlive { + if self.critical_error { + return KeepAlive::No; + } + if let Some( OutboundSubstreamState::PendingSend(_, _) | OutboundSubstreamState::PendingFlush(_), ) = self.outbound_substream @@ -212,14 +210,6 @@ impl ConnectionHandler for Handler { Self::Error, >, > { - if self.critical_error { - // Returning a `Close` event will inform all other handlers to close their connections - // to that peer - return Poll::Ready(ConnectionHandlerEvent::Close( - HandlerError::ReplicationError, - )); - } - // Determine if we need to create the outbound stream if !self.send_queue.is_empty() && self.outbound_substream.is_none() @@ -274,17 +264,13 @@ impl ConnectionHandler for Handler { match Sink::poll_close(Pin::new(&mut substream), cx) { Poll::Ready(res) => { if let Err(err) = res { + // Don't close the connection but just drop the inbound substream. + // In case the remote has more to send, they will open up a new + // substream. warn!("Error during closing inbound connection: {err}") } - self.inbound_substream = None; - - // Close all connection handlers because we can assume that the remote - // peer actively closed an existing connection and probably went - // offline - return Poll::Ready(ConnectionHandlerEvent::Close( - HandlerError::RemotePeerDisconnected, - )); + break; } Poll::Pending => { self.inbound_substream = @@ -335,19 +321,15 @@ impl ConnectionHandler for Handler { } Err(err) => { warn!("Error sending outbound message: {err}"); - - return Poll::Ready(ConnectionHandlerEvent::Close( - HandlerError::Codec(err), - )); + self.outbound_substream = None; + break; } } } Poll::Ready(Err(err)) => { warn!("Error encoding outbound message: {err}"); - - return Poll::Ready(ConnectionHandlerEvent::Close( - HandlerError::Codec(err), - )); + self.outbound_substream = None; + break; } Poll::Pending => { self.outbound_substream = @@ -365,10 +347,8 @@ impl ConnectionHandler for Handler { } Poll::Ready(Err(err)) => { warn!("Error flushing outbound message: {err}"); - - return Poll::Ready(ConnectionHandlerEvent::Close( - HandlerError::Codec(err), - )); + self.outbound_substream = None; + break; } Poll::Pending => { self.outbound_substream = From c47cfee3f7c5bda120d9c049d92e0f8a1b19d2c5 Mon Sep 17 00:00:00 2001 From: adz Date: Mon, 12 Jun 2023 16:08:30 +0200 Subject: [PATCH 105/126] Remove dead code --- aquadoggo/src/network/service.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/aquadoggo/src/network/service.rs b/aquadoggo/src/network/service.rs index f035291ef..0b539729d 100644 --- a/aquadoggo/src/network/service.rs +++ b/aquadoggo/src/network/service.rs @@ -244,7 +244,6 @@ impl EventLoop { } SwarmEvent::OutgoingConnectionError { peer_id, error } => { warn!("OutgoingConnectionError: {peer_id:?} {error:?}"); - // self.send_service_message(ServiceMessage::ConnectionError(peer_id)); } // ~~~~ From 4b2885cde4e532a40c709ff35f834061de84cc64 Mon Sep 17 00:00:00 2001 From: adz Date: Fri, 16 Jun 2023 21:09:05 +0200 Subject: [PATCH 106/126] Check more often when using ping and mDNS discovery --- aquadoggo/src/network/behaviour.rs | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/aquadoggo/src/network/behaviour.rs b/aquadoggo/src/network/behaviour.rs index fb9a9f20c..d929b1a26 100644 --- a/aquadoggo/src/network/behaviour.rs +++ b/aquadoggo/src/network/behaviour.rs @@ -1,5 +1,7 @@ // SPDX-License-Identifier: AGPL-3.0-or-later +use std::time::Duration; + use anyhow::Result; use libp2p::identity::Keypair; use libp2p::swarm::behaviour::toggle::Toggle; @@ -11,6 +13,16 @@ use crate::network::config::NODE_NAMESPACE; use crate::network::replication; use crate::network::NetworkConfiguration; +/// How often do we broadcast mDNS queries into the network. +const MDNS_QUERY_INTERVAL: Duration = Duration::from_secs(5); + +/// How often do we ping other peers to check for a healthy connection. +const PING_INTERVAL: Duration = Duration::from_secs(5); + +/// How long do we wait for an answer from the other peer before we consider the connection as +/// stale. +const PING_TIMEOUT: Duration = Duration::from_secs(3); + /// Network behaviour for the aquadoggo node. #[derive(NetworkBehaviour)] pub struct Behaviour { @@ -89,7 +101,13 @@ impl Behaviour { // Create an mDNS behaviour with default configuration if the mDNS flag is set let mdns = if network_config.mdns { debug!("mDNS network behaviour enabled"); - Some(mdns::Behaviour::new(Default::default(), peer_id)?) + Some(mdns::Behaviour::new( + mdns::Config { + query_interval: MDNS_QUERY_INTERVAL, + ..mdns::Config::default() + }, + peer_id, + )?) } else { None }; @@ -97,7 +115,11 @@ impl Behaviour { // Create a ping behaviour with default configuration if the ping flag is set let ping = if network_config.ping { debug!("Ping network behaviour enabled"); - Some(ping::Behaviour::default()) + Some(ping::Behaviour::new( + ping::Config::new() + .with_interval(PING_INTERVAL) + .with_timeout(PING_TIMEOUT), + )) } else { None }; @@ -106,7 +128,6 @@ impl Behaviour { // address has been provided let rendezvous_client = if network_config.rendezvous_address.is_some() { debug!("Rendezvous client network behaviour enabled"); - // @TODO: Why does this need the whole key pair?! Some(rendezvous::client::Behaviour::new(key_pair)) } else { None From fdf4f4a275ad0c1269419f2a017399d402f2715a Mon Sep 17 00:00:00 2001 From: adz Date: Fri, 16 Jun 2023 21:09:27 +0200 Subject: [PATCH 107/126] Close replication session on all errors --- aquadoggo/src/replication/service.rs | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index 344f8a7de..db8db12b2 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -209,12 +209,7 @@ impl ConnectionManager { } } - match error { - ReplicationError::StrategyFailed(_) | ReplicationError::Validation(_) => { - self.sync_manager.remove_session(&peer_id, &session_id); - } - _ => (), // Don't try and close the session on other errors as it should not have been initiated - } + self.sync_manager.remove_session(&peer_id, &session_id); // Inform network service about error, so it can accordingly react self.send_service_message(ServiceMessage::ReplicationFailed(peer_id)); From 4682e187fc46238032089c36e374b08fec72ce15 Mon Sep 17 00:00:00 2001 From: adz Date: Fri, 16 Jun 2023 21:09:43 +0200 Subject: [PATCH 108/126] Better error logging --- aquadoggo/src/replication/errors.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/aquadoggo/src/replication/errors.rs b/aquadoggo/src/replication/errors.rs index c55ae4e1c..4738ca3f2 100644 --- a/aquadoggo/src/replication/errors.rs +++ b/aquadoggo/src/replication/errors.rs @@ -24,7 +24,7 @@ pub enum ReplicationError { #[error("Replication strategy failed with error: {0}")] StrategyFailed(String), - #[error("Incoming data could not be ingested")] + #[error("Incoming data could not be ingested: {0}")] Validation(#[from] IngestError), } @@ -37,10 +37,10 @@ pub enum IngestError { #[error(transparent)] Domain(#[from] p2panda_rs::api::DomainError), - #[error("Decoding entry failed")] + #[error("Decoding entry failed: {0}")] DecodeEntry(#[from] p2panda_rs::entry::error::DecodeEntryError), - #[error("Decoding operation failed")] + #[error("Decoding operation failed: {0}")] DecodeOperation(#[from] p2panda_rs::operation::error::DecodeOperationError), } From 0c3dde9cecd73932d44398680ead1f256b0fc27c Mon Sep 17 00:00:00 2001 From: adz Date: Fri, 16 Jun 2023 21:10:10 +0200 Subject: [PATCH 109/126] Fix issue where outbound streams could not be re-established after error --- aquadoggo/src/network/replication/handler.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/aquadoggo/src/network/replication/handler.rs b/aquadoggo/src/network/replication/handler.rs index 4bf3bf3ec..17a3a6057 100644 --- a/aquadoggo/src/network/replication/handler.rs +++ b/aquadoggo/src/network/replication/handler.rs @@ -19,7 +19,7 @@ use crate::replication::SyncMessage; /// The time a connection is maintained to a peer without being in live mode and without /// send/receiving a message from. Connections that idle beyond this timeout are disconnected. -const IDLE_TIMEOUT: Duration = Duration::from_secs(30); +const IDLE_TIMEOUT: Duration = Duration::from_secs(60); pub struct Handler { /// Upgrade configuration for the replication protocol. @@ -69,6 +69,7 @@ impl Handler { >, ) { self.outbound_substream = Some(OutboundSubstreamState::WaitingOutput(protocol)); + self.outbound_substream_establishing = false; } fn on_fully_negotiated_inbound( From bfd9defa7aee57b82955a27b6132af35e5c20922 Mon Sep 17 00:00:00 2001 From: adz Date: Fri, 16 Jun 2023 21:10:49 +0200 Subject: [PATCH 110/126] Add behaviour logic which always uses latest healthy connection --- .../src/network/replication/behaviour.rs | 86 +++++++++++++++---- 1 file changed, 70 insertions(+), 16 deletions(-) diff --git a/aquadoggo/src/network/replication/behaviour.rs b/aquadoggo/src/network/replication/behaviour.rs index ab1807f8d..c7579ff07 100644 --- a/aquadoggo/src/network/replication/behaviour.rs +++ b/aquadoggo/src/network/replication/behaviour.rs @@ -1,6 +1,6 @@ // SPDX-License-Identifier: AGPL-3.0-or-later -use std::collections::VecDeque; +use std::collections::{HashMap, VecDeque}; use std::task::{Context, Poll}; use libp2p::core::Endpoint; @@ -10,7 +10,7 @@ use libp2p::swarm::{ PollParameters, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; use libp2p::{Multiaddr, PeerId}; -use log::trace; +use log::{info, trace}; use p2panda_rs::Human; use crate::network::replication::handler::{Handler, HandlerInEvent, HandlerOutEvent}; @@ -31,12 +31,57 @@ pub enum Event { #[derive(Debug)] pub struct Behaviour { events: VecDeque>, + peers: HashMap>, } impl Behaviour { pub fn new() -> Self { Self { events: VecDeque::new(), + peers: HashMap::new(), + } + } + + fn on_connection_established(&mut self, peer_id: PeerId, connection_id: ConnectionId) { + // Register latest connection at the end of our list for peer + let connections = match self.peers.get(&peer_id) { + Some(vec) => { + let mut vec = vec.clone(); + vec.push(connection_id); + vec + } + None => { + // Inform other services about new peer when we see it for the first time + info!("Connected to new peer {peer_id}"); + self.events + .push_back(ToSwarm::GenerateEvent(Event::PeerConnected(peer_id))); + + vec![connection_id] + } + }; + + self.peers.insert(peer_id, connections); + } + + fn on_connection_closed(&mut self, peer_id: PeerId, connection_id: ConnectionId) { + if let Some(connections) = self.peers.get(&peer_id) { + let filtered: Vec = connections + .to_owned() + .into_iter() + .filter(|connection| connection != &connection_id) + .collect(); + + if filtered.is_empty() { + info!("Disconnected from peer {peer_id}"); + self.events + .push_back(ToSwarm::GenerateEvent(Event::PeerDisconnected(peer_id))); + + self.peers.remove(&peer_id); + } else { + self.peers.insert(peer_id, filtered); + } + } else { + panic!("Tried to close connection of inexistent peer"); } } @@ -52,23 +97,38 @@ impl Behaviour { } pub fn send_message(&mut self, peer_id: PeerId, message: SyncMessage) { + let connection_id = self + .peers + .get(&peer_id) + .expect("Tried to handle error for unknown peer") + .last() + .expect("Tried to handle error for peer with inexistent connections"); + trace!( "Notify handler of sent sync message: {peer_id} {}", message.display() ); + self.events.push_back(ToSwarm::NotifyHandler { peer_id, event: HandlerInEvent::Message(message), - handler: NotifyHandler::Any, + handler: NotifyHandler::One(connection_id.to_owned()), }); } /// React to errors coming from the replication protocol living inside the replication service. pub fn handle_error(&mut self, peer_id: PeerId) { + let connection_id = self + .peers + .get(&peer_id) + .expect("Tried to handle error for unknown peer") + .last() + .expect("Tried to handle error for peer with inexistent connections"); + self.events.push_back(ToSwarm::NotifyHandler { peer_id, event: HandlerInEvent::ReplicationError, - handler: NotifyHandler::Any, + handler: NotifyHandler::One(connection_id.to_owned()), }); } } @@ -113,25 +173,19 @@ impl NetworkBehaviour for Behaviour { fn on_swarm_event(&mut self, event: FromSwarm) { match event { - FromSwarm::ConnectionClosed(ConnectionClosed { + FromSwarm::ConnectionEstablished(ConnectionEstablished { peer_id, - remaining_established, + connection_id, .. }) => { - if remaining_established == 0 { - self.events - .push_back(ToSwarm::GenerateEvent(Event::PeerDisconnected(peer_id))); - } + self.on_connection_established(peer_id, connection_id); } - FromSwarm::ConnectionEstablished(ConnectionEstablished { + FromSwarm::ConnectionClosed(ConnectionClosed { peer_id, - other_established, + connection_id, .. }) => { - if other_established == 0 { - self.events - .push_back(ToSwarm::GenerateEvent(Event::PeerConnected(peer_id))); - } + self.on_connection_closed(peer_id, connection_id); } FromSwarm::AddressChange(_) | FromSwarm::DialFailure(_) From 891892f787a2ce46430294a3418a667325dfa0bd Mon Sep 17 00:00:00 2001 From: adz Date: Fri, 16 Jun 2023 21:17:38 +0200 Subject: [PATCH 111/126] Rename to peers behaviour --- aquadoggo/src/network/behaviour.rs | 10 ++++++---- aquadoggo/src/network/mod.rs | 2 +- .../{replication => peers}/behaviour.rs | 20 +++++++++---------- .../network/{replication => peers}/handler.rs | 14 ++++++------- .../src/network/{replication => peers}/mod.rs | 0 .../{replication => peers}/protocol.rs | 0 aquadoggo/src/network/service.rs | 20 +++++++++---------- 7 files changed, 34 insertions(+), 32 deletions(-) rename aquadoggo/src/network/{replication => peers}/behaviour.rs (94%) rename aquadoggo/src/network/{replication => peers}/handler.rs (97%) rename aquadoggo/src/network/{replication => peers}/mod.rs (100%) rename aquadoggo/src/network/{replication => peers}/protocol.rs (100%) diff --git a/aquadoggo/src/network/behaviour.rs b/aquadoggo/src/network/behaviour.rs index d929b1a26..5e061d186 100644 --- a/aquadoggo/src/network/behaviour.rs +++ b/aquadoggo/src/network/behaviour.rs @@ -10,7 +10,7 @@ use libp2p::{autonat, connection_limits, identify, mdns, ping, relay, rendezvous use log::debug; use crate::network::config::NODE_NAMESPACE; -use crate::network::replication; +use crate::network::peers; use crate::network::NetworkConfiguration; /// How often do we broadcast mDNS queries into the network. @@ -59,7 +59,8 @@ pub struct Behaviour { /// the addresses of other peers. pub rendezvous_server: Toggle, - pub replication: replication::Behaviour, + /// Register peer connections and handle p2panda messaging with them. + pub peers: peers::Behaviour, } impl Behaviour { @@ -157,7 +158,8 @@ impl Behaviour { None }; - let replication = replication::Behaviour::new(); + // Create behaviour to manage peer connections and handle p2panda messaging + let peers = peers::Behaviour::new(); Ok(Self { autonat: autonat.into(), @@ -169,7 +171,7 @@ impl Behaviour { rendezvous_server: rendezvous_server.into(), relay_client: relay_client.into(), relay_server: relay_server.into(), - replication, + peers, }) } } diff --git a/aquadoggo/src/network/mod.rs b/aquadoggo/src/network/mod.rs index 7b34116b5..5c2b14fbb 100644 --- a/aquadoggo/src/network/mod.rs +++ b/aquadoggo/src/network/mod.rs @@ -3,7 +3,7 @@ mod behaviour; mod config; pub mod identity; -mod replication; +mod peers; mod service; mod swarm; mod transport; diff --git a/aquadoggo/src/network/replication/behaviour.rs b/aquadoggo/src/network/peers/behaviour.rs similarity index 94% rename from aquadoggo/src/network/replication/behaviour.rs rename to aquadoggo/src/network/peers/behaviour.rs index c7579ff07..6ae76ed00 100644 --- a/aquadoggo/src/network/replication/behaviour.rs +++ b/aquadoggo/src/network/peers/behaviour.rs @@ -13,12 +13,12 @@ use libp2p::{Multiaddr, PeerId}; use log::{info, trace}; use p2panda_rs::Human; -use crate::network::replication::handler::{Handler, HandlerInEvent, HandlerOutEvent}; +use crate::network::peers::handler::{Handler, HandlerInEvent, HandlerOutEvent}; use crate::replication::SyncMessage; #[derive(Debug)] pub enum Event { - /// Replication message received on the inbound stream. + /// Message received on the inbound stream. MessageReceived(PeerId, SyncMessage), /// We established an inbound or outbound connection to a peer for the first time. @@ -116,7 +116,7 @@ impl Behaviour { }); } - /// React to errors coming from the replication protocol living inside the replication service. + /// React to errors from other services (for example replication service). pub fn handle_error(&mut self, peer_id: PeerId) { let connection_id = self .peers @@ -127,7 +127,7 @@ impl Behaviour { self.events.push_back(ToSwarm::NotifyHandler { peer_id, - event: HandlerInEvent::ReplicationError, + event: HandlerInEvent::CriticalError, handler: NotifyHandler::One(connection_id.to_owned()), }); } @@ -224,13 +224,13 @@ mod tests { use crate::replication::{Message, SyncMessage, TargetSet}; use crate::test_utils::helpers::random_target_set; - use super::{Behaviour as ReplicationBehaviour, Event}; + use super::{Behaviour as PeersBehaviour, Event}; #[tokio::test] async fn peers_connect() { // Create two swarms - let mut swarm1 = Swarm::new_ephemeral(|_| ReplicationBehaviour::new()); - let mut swarm2 = Swarm::new_ephemeral(|_| ReplicationBehaviour::new()); + let mut swarm1 = Swarm::new_ephemeral(|_| PeersBehaviour::new()); + let mut swarm2 = Swarm::new_ephemeral(|_| PeersBehaviour::new()); // Listen on swarm1 and connect from swarm2, this should establish a bi-directional // connection. @@ -259,7 +259,7 @@ mod tests { #[tokio::test] async fn incompatible_network_behaviour() { // Create two swarms - let mut swarm1 = Swarm::new_ephemeral(|_| ReplicationBehaviour::new()); + let mut swarm1 = Swarm::new_ephemeral(|_| PeersBehaviour::new()); let mut swarm2 = Swarm::new_ephemeral(|_| keep_alive::Behaviour); // Listen on swarm1 and connect from swarm2, this should establish a bi-directional connection. @@ -312,8 +312,8 @@ mod tests { #[case] target_set_2: TargetSet, ) { // Create two swarms - let mut swarm1 = Swarm::new_ephemeral(|_| ReplicationBehaviour::new()); - let mut swarm2 = Swarm::new_ephemeral(|_| ReplicationBehaviour::new()); + let mut swarm1 = Swarm::new_ephemeral(|_| PeersBehaviour::new()); + let mut swarm2 = Swarm::new_ephemeral(|_| PeersBehaviour::new()); // Listen on swarm1 and connect from swarm2, this should establish a bi-directional connection. swarm1.listen().await; diff --git a/aquadoggo/src/network/replication/handler.rs b/aquadoggo/src/network/peers/handler.rs similarity index 97% rename from aquadoggo/src/network/replication/handler.rs rename to aquadoggo/src/network/peers/handler.rs index 17a3a6057..82128904b 100644 --- a/aquadoggo/src/network/replication/handler.rs +++ b/aquadoggo/src/network/peers/handler.rs @@ -14,7 +14,7 @@ use libp2p::swarm::{ use log::warn; use thiserror::Error; -use crate::network::replication::{Codec, CodecError, Protocol}; +use crate::network::peers::{Codec, CodecError, Protocol}; use crate::replication::SyncMessage; /// The time a connection is maintained to a peer without being in live mode and without @@ -22,7 +22,7 @@ use crate::replication::SyncMessage; const IDLE_TIMEOUT: Duration = Duration::from_secs(60); pub struct Handler { - /// Upgrade configuration for the replication protocol. + /// Upgrade configuration for the protocol. listen_protocol: SubstreamProtocol, /// The single long-lived outbound substream. @@ -86,11 +86,11 @@ impl Handler { /// An event sent from the network behaviour to the connection handler. #[derive(Debug)] pub enum HandlerInEvent { - /// Replication message to send on outbound stream. + /// Message to send on outbound stream. Message(SyncMessage), - /// Replication protocol failed with an error. - ReplicationError, + /// Protocol failed with a critical error. + CriticalError, } /// The event emitted by the connection handler. @@ -98,7 +98,7 @@ pub enum HandlerInEvent { /// This informs the network behaviour of various events created by the handler. #[derive(Debug)] pub enum HandlerOutEvent { - /// Replication message received on the inbound stream. + /// Message received on the inbound stream. Message(SyncMessage), } @@ -179,7 +179,7 @@ impl ConnectionHandler for Handler { HandlerInEvent::Message(message) => { self.send_queue.push_back(message); } - HandlerInEvent::ReplicationError => { + HandlerInEvent::CriticalError => { self.critical_error = true; } } diff --git a/aquadoggo/src/network/replication/mod.rs b/aquadoggo/src/network/peers/mod.rs similarity index 100% rename from aquadoggo/src/network/replication/mod.rs rename to aquadoggo/src/network/peers/mod.rs diff --git a/aquadoggo/src/network/replication/protocol.rs b/aquadoggo/src/network/peers/protocol.rs similarity index 100% rename from aquadoggo/src/network/replication/protocol.rs rename to aquadoggo/src/network/peers/protocol.rs diff --git a/aquadoggo/src/network/service.rs b/aquadoggo/src/network/service.rs index 0b539729d..f4d399e4f 100644 --- a/aquadoggo/src/network/service.rs +++ b/aquadoggo/src/network/service.rs @@ -15,7 +15,7 @@ use crate::context::Context; use crate::manager::{ServiceReadySender, Shutdown}; use crate::network::behaviour::{Behaviour, BehaviourEvent}; use crate::network::config::NODE_NAMESPACE; -use crate::network::replication; +use crate::network::peers; use crate::network::swarm; use crate::network::NetworkConfiguration; @@ -162,11 +162,11 @@ impl EventLoop { ServiceMessage::SentReplicationMessage(peer_id, sync_message) => { self.swarm .behaviour_mut() - .replication + .peers .send_message(peer_id, sync_message); } ServiceMessage::ReplicationFailed(peer_id) => { - self.swarm.behaviour_mut().replication.handle_error(peer_id); + self.swarm.behaviour_mut().peers.handle_error(peer_id); } _ => (), } @@ -426,18 +426,18 @@ impl EventLoop { debug!("Unhandled connection limit event: {event:?}") } - // ~~~~~~~~~~~ - // Replication - // ~~~~~~~~~~~ - SwarmEvent::Behaviour(BehaviourEvent::Replication(event)) => match event { - replication::Event::MessageReceived(peer_id, message) => self.send_service_message( + // ~~~~~~~~~~~~~ + // p2panda peers + // ~~~~~~~~~~~~~ + SwarmEvent::Behaviour(BehaviourEvent::Peers(event)) => match event { + peers::Event::MessageReceived(peer_id, message) => self.send_service_message( ServiceMessage::ReceivedReplicationMessage(peer_id, message), ), - replication::Event::PeerConnected(peer_id) => { + peers::Event::PeerConnected(peer_id) => { // Inform other services about new connection self.send_service_message(ServiceMessage::PeerConnected(peer_id)); } - replication::Event::PeerDisconnected(peer_id) => { + peers::Event::PeerDisconnected(peer_id) => { // Inform other services about closed connection self.send_service_message(ServiceMessage::PeerDisconnected(peer_id)); } From 94378ee0497680269798a2ddfffae38968a40502 Mon Sep 17 00:00:00 2001 From: adz Date: Fri, 16 Jun 2023 21:18:14 +0200 Subject: [PATCH 112/126] Make clippy happy --- aquadoggo/src/network/peers/behaviour.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aquadoggo/src/network/peers/behaviour.rs b/aquadoggo/src/network/peers/behaviour.rs index 6ae76ed00..4fc267736 100644 --- a/aquadoggo/src/network/peers/behaviour.rs +++ b/aquadoggo/src/network/peers/behaviour.rs @@ -66,8 +66,8 @@ impl Behaviour { fn on_connection_closed(&mut self, peer_id: PeerId, connection_id: ConnectionId) { if let Some(connections) = self.peers.get(&peer_id) { let filtered: Vec = connections - .to_owned() - .into_iter() + .iter() + .copied() .filter(|connection| connection != &connection_id) .collect(); From 45a597660ecd4a4ef846410549f0210e725353e6 Mon Sep 17 00:00:00 2001 From: adz Date: Fri, 16 Jun 2023 21:19:38 +0200 Subject: [PATCH 113/126] Add entry to CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bc4c46f82..8fc524f6b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Replication protocol session manager [#363](https://github.com/p2panda/aquadoggo/pull/363) - Replication message de- / serialization [#375](https://github.com/p2panda/aquadoggo/pull/375) - Naive protocol replication [#380](https://github.com/p2panda/aquadoggo/pull/380) +- Integrate replication manager with networking stack [#387](https://github.com/p2panda/aquadoggo/pull/387) 🥞 ### Changed From a2c28da311943708901bfcb15d6de43211118dbb Mon Sep 17 00:00:00 2001 From: adz Date: Sun, 18 Jun 2023 12:57:26 +0200 Subject: [PATCH 114/126] Use connection ids to identify peers --- aquadoggo/src/bus.rs | 12 +- .../scalars/document_view_id_scalar.rs | 3 +- aquadoggo/src/network/mod.rs | 1 + aquadoggo/src/network/peers/behaviour.rs | 104 ++++++------------ aquadoggo/src/network/peers/mod.rs | 2 + aquadoggo/src/network/peers/peer.rs | 46 ++++++++ aquadoggo/src/network/service.rs | 29 ++--- aquadoggo/src/replication/service.rs | 94 ++++++++-------- 8 files changed, 153 insertions(+), 138 deletions(-) create mode 100644 aquadoggo/src/network/peers/peer.rs diff --git a/aquadoggo/src/bus.rs b/aquadoggo/src/bus.rs index bed340daf..addc9402c 100644 --- a/aquadoggo/src/bus.rs +++ b/aquadoggo/src/bus.rs @@ -1,9 +1,9 @@ // SPDX-License-Identifier: AGPL-3.0-or-later -use libp2p::PeerId; use p2panda_rs::operation::OperationId; use crate::manager::Sender; +use crate::network::Peer; use crate::replication::SyncMessage; /// Sender for cross-service communication bus. @@ -16,17 +16,17 @@ pub enum ServiceMessage { NewOperation(OperationId), /// Node established a bi-directional connection to another node. - PeerConnected(PeerId), + PeerConnected(Peer), /// Node closed a connection to another node. - PeerDisconnected(PeerId), + PeerDisconnected(Peer), /// Node sent a message to remote node for replication. - SentReplicationMessage(PeerId, SyncMessage), + SentReplicationMessage(Peer, SyncMessage), /// Node received a message from remote node for replication. - ReceivedReplicationMessage(PeerId, SyncMessage), + ReceivedReplicationMessage(Peer, SyncMessage), /// Replication protocol failed with an critical error. - ReplicationFailed(PeerId), + ReplicationFailed(Peer), } diff --git a/aquadoggo/src/graphql/scalars/document_view_id_scalar.rs b/aquadoggo/src/graphql/scalars/document_view_id_scalar.rs index 582dbedf1..ef522b908 100644 --- a/aquadoggo/src/graphql/scalars/document_view_id_scalar.rs +++ b/aquadoggo/src/graphql/scalars/document_view_id_scalar.rs @@ -1,6 +1,7 @@ // SPDX-License-Identifier: AGPL-3.0-or-later -use std::{fmt::Display, str::FromStr}; +use std::fmt::Display; +use std::str::FromStr; use dynamic_graphql::{Error, Result, Scalar, ScalarValue, Value}; use p2panda_rs::document::DocumentViewId; diff --git a/aquadoggo/src/network/mod.rs b/aquadoggo/src/network/mod.rs index 5c2b14fbb..907f2a3a6 100644 --- a/aquadoggo/src/network/mod.rs +++ b/aquadoggo/src/network/mod.rs @@ -9,4 +9,5 @@ mod swarm; mod transport; pub use config::NetworkConfiguration; +pub use peers::Peer; pub use service::network_service; diff --git a/aquadoggo/src/network/peers/behaviour.rs b/aquadoggo/src/network/peers/behaviour.rs index 4fc267736..443b4d2cd 100644 --- a/aquadoggo/src/network/peers/behaviour.rs +++ b/aquadoggo/src/network/peers/behaviour.rs @@ -1,6 +1,6 @@ // SPDX-License-Identifier: AGPL-3.0-or-later -use std::collections::{HashMap, VecDeque}; +use std::collections::VecDeque; use std::task::{Context, Poll}; use libp2p::core::Endpoint; @@ -14,121 +14,81 @@ use log::{info, trace}; use p2panda_rs::Human; use crate::network::peers::handler::{Handler, HandlerInEvent, HandlerOutEvent}; +use crate::network::peers::Peer; use crate::replication::SyncMessage; #[derive(Debug)] pub enum Event { /// Message received on the inbound stream. - MessageReceived(PeerId, SyncMessage), + MessageReceived(Peer, SyncMessage), /// We established an inbound or outbound connection to a peer for the first time. - PeerConnected(PeerId), + PeerConnected(Peer), /// Peer does not have any inbound or outbound connections left with us. - PeerDisconnected(PeerId), + PeerDisconnected(Peer), } #[derive(Debug)] pub struct Behaviour { events: VecDeque>, - peers: HashMap>, } impl Behaviour { pub fn new() -> Self { Self { events: VecDeque::new(), - peers: HashMap::new(), } } fn on_connection_established(&mut self, peer_id: PeerId, connection_id: ConnectionId) { - // Register latest connection at the end of our list for peer - let connections = match self.peers.get(&peer_id) { - Some(vec) => { - let mut vec = vec.clone(); - vec.push(connection_id); - vec - } - None => { - // Inform other services about new peer when we see it for the first time - info!("Connected to new peer {peer_id}"); - self.events - .push_back(ToSwarm::GenerateEvent(Event::PeerConnected(peer_id))); - - vec![connection_id] - } - }; - - self.peers.insert(peer_id, connections); + let peer = Peer::new(peer_id, connection_id); + info!("New peer connected: {peer}"); + self.events + .push_back(ToSwarm::GenerateEvent(Event::PeerConnected(peer))); } fn on_connection_closed(&mut self, peer_id: PeerId, connection_id: ConnectionId) { - if let Some(connections) = self.peers.get(&peer_id) { - let filtered: Vec = connections - .iter() - .copied() - .filter(|connection| connection != &connection_id) - .collect(); - - if filtered.is_empty() { - info!("Disconnected from peer {peer_id}"); - self.events - .push_back(ToSwarm::GenerateEvent(Event::PeerDisconnected(peer_id))); - - self.peers.remove(&peer_id); - } else { - self.peers.insert(peer_id, filtered); - } - } else { - panic!("Tried to close connection of inexistent peer"); - } + let peer = Peer::new(peer_id, connection_id); + info!("Peer disconnected: {peer}"); + self.events + .push_back(ToSwarm::GenerateEvent(Event::PeerDisconnected(peer))); } - fn handle_received_message(&mut self, peer_id: &PeerId, message: SyncMessage) { + fn on_received_message( + &mut self, + peer_id: PeerId, + connection_id: ConnectionId, + message: SyncMessage, + ) { + let peer = Peer::new(peer_id, connection_id); trace!( - "Notify swarm of received sync message: {peer_id} {}", + "Notify swarm of received sync message: {peer} {}", message.display() ); self.events .push_back(ToSwarm::GenerateEvent(Event::MessageReceived( - *peer_id, message, + peer, message, ))); } - pub fn send_message(&mut self, peer_id: PeerId, message: SyncMessage) { - let connection_id = self - .peers - .get(&peer_id) - .expect("Tried to handle error for unknown peer") - .last() - .expect("Tried to handle error for peer with inexistent connections"); - + pub fn send_message(&mut self, peer: Peer, message: SyncMessage) { trace!( - "Notify handler of sent sync message: {peer_id} {}", + "Notify handler of sent sync message: {peer} {}", message.display() ); - self.events.push_back(ToSwarm::NotifyHandler { - peer_id, + peer_id: peer.id(), event: HandlerInEvent::Message(message), - handler: NotifyHandler::One(connection_id.to_owned()), + handler: NotifyHandler::One(peer.connection_id()), }); } - /// React to errors from other services (for example replication service). - pub fn handle_error(&mut self, peer_id: PeerId) { - let connection_id = self - .peers - .get(&peer_id) - .expect("Tried to handle error for unknown peer") - .last() - .expect("Tried to handle error for peer with inexistent connections"); - + pub fn handle_critical_error(&mut self, peer: Peer) { self.events.push_back(ToSwarm::NotifyHandler { - peer_id, + peer_id: peer.id(), event: HandlerInEvent::CriticalError, - handler: NotifyHandler::One(connection_id.to_owned()), + handler: NotifyHandler::One(peer.connection_id()), }); } } @@ -160,13 +120,13 @@ impl NetworkBehaviour for Behaviour { fn on_connection_handler_event( &mut self, - peer: PeerId, - _connection_id: ConnectionId, + peer_id: PeerId, + connection_id: ConnectionId, handler_event: THandlerOutEvent, ) { match handler_event { HandlerOutEvent::Message(message) => { - self.handle_received_message(&peer, message); + self.on_received_message(peer_id, connection_id, message); } } } diff --git a/aquadoggo/src/network/peers/mod.rs b/aquadoggo/src/network/peers/mod.rs index 1962e64d4..579c75f3c 100644 --- a/aquadoggo/src/network/peers/mod.rs +++ b/aquadoggo/src/network/peers/mod.rs @@ -2,8 +2,10 @@ mod behaviour; mod handler; +mod peer; mod protocol; pub use behaviour::{Behaviour, Event}; pub use handler::Handler; +pub use peer::Peer; pub use protocol::{Codec, CodecError, Protocol, PROTOCOL_NAME}; diff --git a/aquadoggo/src/network/peers/peer.rs b/aquadoggo/src/network/peers/peer.rs new file mode 100644 index 000000000..e62dbfc16 --- /dev/null +++ b/aquadoggo/src/network/peers/peer.rs @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: AGPL-3.0-or-later + +use std::cmp::Ordering; +use std::fmt::{Display, Formatter, Result}; + +use libp2p::swarm::ConnectionId; +use libp2p::PeerId; + +#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] +pub struct Peer(PeerId, ConnectionId); + +impl Peer { + pub fn new(peer_id: PeerId, connection_id: ConnectionId) -> Self { + Self(peer_id, connection_id) + } + + pub fn new_local_peer(local_peer_id: PeerId) -> Self { + Self(local_peer_id, ConnectionId::new_unchecked(0)) + } + + pub fn id(&self) -> PeerId { + self.0 + } + + pub fn connection_id(&self) -> ConnectionId { + self.1 + } +} + +impl Ord for Peer { + fn cmp(&self, other: &Self) -> Ordering { + self.0.cmp(&other.0) + } +} + +impl PartialOrd for Peer { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.0.cmp(&other.0)) + } +} + +impl Display for Peer { + fn fmt(&self, f: &mut Formatter<'_>) -> Result { + write!(f, "Peer({}, {:?})", self.0, self.1) + } +} diff --git a/aquadoggo/src/network/service.rs b/aquadoggo/src/network/service.rs index f4d399e4f..35fe2e224 100644 --- a/aquadoggo/src/network/service.rs +++ b/aquadoggo/src/network/service.rs @@ -159,14 +159,14 @@ impl EventLoop { /// Handle an incoming message via the communication bus from other services. async fn handle_service_message(&mut self, message: ServiceMessage) { match message { - ServiceMessage::SentReplicationMessage(peer_id, sync_message) => { + ServiceMessage::SentReplicationMessage(peer, sync_message) => { self.swarm .behaviour_mut() .peers - .send_message(peer_id, sync_message); + .send_message(peer, sync_message); } - ServiceMessage::ReplicationFailed(peer_id) => { - self.swarm.behaviour_mut().peers.handle_error(peer_id); + ServiceMessage::ReplicationFailed(peer) => { + self.swarm.behaviour_mut().peers.handle_critical_error(peer); } _ => (), } @@ -430,16 +430,19 @@ impl EventLoop { // p2panda peers // ~~~~~~~~~~~~~ SwarmEvent::Behaviour(BehaviourEvent::Peers(event)) => match event { - peers::Event::MessageReceived(peer_id, message) => self.send_service_message( - ServiceMessage::ReceivedReplicationMessage(peer_id, message), - ), - peers::Event::PeerConnected(peer_id) => { - // Inform other services about new connection - self.send_service_message(ServiceMessage::PeerConnected(peer_id)); + peers::Event::PeerConnected(peer) => { + // Inform other services about new peer + self.send_service_message(ServiceMessage::PeerConnected(peer)); } - peers::Event::PeerDisconnected(peer_id) => { - // Inform other services about closed connection - self.send_service_message(ServiceMessage::PeerDisconnected(peer_id)); + peers::Event::PeerDisconnected(peer) => { + // Inform other services about peer leaving + self.send_service_message(ServiceMessage::PeerDisconnected(peer)); + } + peers::Event::MessageReceived(peer, message) => { + // Inform other services about received messages from peer + self.send_service_message(ServiceMessage::ReceivedReplicationMessage( + peer, message, + )) } }, diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index db8db12b2..e5406b15d 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -16,6 +16,7 @@ use crate::bus::{ServiceMessage, ServiceSender}; use crate::context::Context; use crate::db::SqlStore; use crate::manager::{ServiceReadySender, Shutdown}; +use crate::network::Peer; use crate::replication::errors::ReplicationError; use crate::replication::{ Mode, Session, SessionId, SyncIngest, SyncManager, SyncMessage, TargetSet, @@ -60,15 +61,15 @@ pub async fn replication_service( #[derive(Debug, Clone, PartialEq, Eq)] struct PeerStatus { - peer_id: PeerId, + peer: Peer, successful_count: usize, failed_count: usize, } impl PeerStatus { - pub fn new(peer_id: &PeerId) -> Self { + pub fn new(peer: Peer) -> Self { Self { - peer_id: *peer_id, + peer, successful_count: 0, failed_count: 0, } @@ -77,11 +78,11 @@ impl PeerStatus { struct ConnectionManager { /// List of peers the connection mananger knows about and are available for replication. - peers: HashMap, + peers: HashMap, /// Replication state manager, data ingest and message generator for handling all replication /// logic. - sync_manager: SyncManager, + sync_manager: SyncManager, /// Async stream giving us a regular interval to initiate new replication sessions. scheduler: IntervalStream, @@ -103,8 +104,9 @@ impl ConnectionManager { tx: &ServiceSender, local_peer_id: PeerId, ) -> Self { + let local_peer = Peer::new_local_peer(local_peer_id); let ingest = SyncIngest::new(schema_provider.clone(), tx.clone()); - let sync_manager = SyncManager::new(store.clone(), ingest, local_peer_id); + let sync_manager = SyncManager::new(store.clone(), ingest, local_peer); let scheduler = IntervalStream::new(interval(UPDATE_INTERVAL)); Self { @@ -129,60 +131,60 @@ impl ConnectionManager { TargetSet::new(&supported_schema_ids) } - fn remove_connection(&mut self, peer_id: PeerId) { - match self.peers.remove(&peer_id) { - Some(_) => debug!("Remove peer: {peer_id}"), + fn remove_connection(&mut self, peer: Peer) { + match self.peers.remove(&peer) { + Some(_) => debug!("Remove peer: {peer}"), None => warn!("Tried to remove connection from unknown peer"), } } - async fn on_connection_established(&mut self, peer_id: PeerId) { - info!("Connected to peer: {peer_id}"); + async fn on_connection_established(&mut self, peer: Peer) { + info!("Connected to peer: {peer}"); - match self.peers.get(&peer_id) { + match self.peers.get(&peer) { Some(_) => { - warn!("Peer already known: {peer_id}"); + warn!("Peer already known: {peer}"); } None => { - self.peers.insert(peer_id, PeerStatus::new(&peer_id)); + self.peers.insert(peer, PeerStatus::new(peer)); self.update_sessions().await; } } } - async fn on_connection_closed(&mut self, peer_id: PeerId) { - info!("Disconnected from peer: {peer_id}"); + async fn on_connection_closed(&mut self, peer: Peer) { + info!("Disconnected from peer: {peer}"); // Clear running replication sessions from sync manager - self.sync_manager.remove_sessions(&peer_id); - self.remove_connection(peer_id) + self.sync_manager.remove_sessions(&peer); + self.remove_connection(peer) } - async fn on_replication_message(&mut self, peer_id: PeerId, message: SyncMessage) { + async fn on_replication_message(&mut self, peer: Peer, message: SyncMessage) { let session_id = message.session_id(); - match self.sync_manager.handle_message(&peer_id, &message).await { + match self.sync_manager.handle_message(&peer, &message).await { Ok(result) => { for message in result.messages { self.send_service_message(ServiceMessage::SentReplicationMessage( - peer_id, message, + peer, message, )); } if result.is_done { - self.on_replication_finished(peer_id, session_id).await; + self.on_replication_finished(peer, session_id).await; } } Err(err) => { - self.on_replication_error(peer_id, session_id, err).await; + self.on_replication_error(peer, session_id, err).await; } } } - async fn on_replication_finished(&mut self, peer_id: PeerId, _session_id: SessionId) { - info!("Finished replication with peer {}", peer_id); + async fn on_replication_finished(&mut self, peer: Peer, _session_id: SessionId) { + info!("Finished replication with peer {peer}"); - match self.peers.get_mut(&peer_id) { + match self.peers.get_mut(&peer) { Some(status) => { status.successful_count += 1; } @@ -194,13 +196,13 @@ impl ConnectionManager { async fn on_replication_error( &mut self, - peer_id: PeerId, + peer: Peer, session_id: SessionId, error: ReplicationError, ) { - warn!("Replication with peer {} failed: {}", peer_id, error); + warn!("Replication with peer {} failed: {}", peer, error); - match self.peers.get_mut(&peer_id) { + match self.peers.get_mut(&peer) { Some(status) => { status.failed_count += 1; } @@ -209,22 +211,22 @@ impl ConnectionManager { } } - self.sync_manager.remove_session(&peer_id, &session_id); + self.sync_manager.remove_session(&peer, &session_id); // Inform network service about error, so it can accordingly react - self.send_service_message(ServiceMessage::ReplicationFailed(peer_id)); + self.send_service_message(ServiceMessage::ReplicationFailed(peer)); } async fn handle_service_message(&mut self, message: ServiceMessage) { match message { - ServiceMessage::PeerConnected(peer_id) => { - self.on_connection_established(peer_id).await; + ServiceMessage::PeerConnected(peer) => { + self.on_connection_established(peer).await; } - ServiceMessage::PeerDisconnected(peer_id) => { - self.on_connection_closed(peer_id).await; + ServiceMessage::PeerDisconnected(peer) => { + self.on_connection_closed(peer).await; } - ServiceMessage::ReceivedReplicationMessage(peer_id, message) => { - self.on_replication_message(peer_id, message).await; + ServiceMessage::ReceivedReplicationMessage(peer, message) => { + self.on_replication_message(peer, message).await; } _ => (), // Ignore all other messages } @@ -242,12 +244,12 @@ impl ConnectionManager { let target_set = self.target_set().await; // Iterate through all currently connected peers - let attempt_peers: Vec = self + let attempt_peers: Vec = self .peers .clone() .into_iter() - .filter_map(|(peer_id, _)| { - let sessions = self.sync_manager.get_sessions(&peer_id); + .filter_map(|(peer, _)| { + let sessions = self.sync_manager.get_sessions(&peer); // 1. Check if we're running too many sessions with that peer on this connection // already. This limit is configurable. @@ -263,7 +265,7 @@ impl ConnectionManager { .any(|session| session.target_set() == target_set); if active_sessions.len() < MAX_SESSIONS_PER_PEER && !has_active_target_set_session { - Some(peer_id) + Some(peer) } else { None } @@ -274,21 +276,21 @@ impl ConnectionManager { debug!("No peers available for replication") } - for peer_id in attempt_peers { - self.initiate_replication(&peer_id, &target_set).await; + for peer in attempt_peers { + self.initiate_replication(&peer, &target_set).await; } } - async fn initiate_replication(&mut self, peer_id: &PeerId, target_set: &TargetSet) { + async fn initiate_replication(&mut self, peer: &Peer, target_set: &TargetSet) { match self .sync_manager - .initiate_session(peer_id, target_set, &Mode::Naive) + .initiate_session(peer, target_set, &Mode::Naive) .await { Ok(messages) => { for message in messages { self.send_service_message(ServiceMessage::SentReplicationMessage( - *peer_id, message, + *peer, message, )); } } From 158ca763a251b236ee5e04b211c84b44b2908e17 Mon Sep 17 00:00:00 2001 From: adz Date: Sun, 18 Jun 2023 13:38:28 +0200 Subject: [PATCH 115/126] Clean up logging a little bit --- aquadoggo/src/graphql/schema.rs | 2 +- aquadoggo/src/network/peers/behaviour.rs | 12 ++-- aquadoggo/src/network/peers/peer.rs | 10 ++-- aquadoggo/src/network/service.rs | 70 +++++++++++++++--------- aquadoggo/src/network/swarm.rs | 2 +- aquadoggo/src/replication/manager.rs | 42 +++++++++----- aquadoggo/src/replication/service.rs | 17 +++--- 7 files changed, 94 insertions(+), 61 deletions(-) diff --git a/aquadoggo/src/graphql/schema.rs b/aquadoggo/src/graphql/schema.rs index bf33b2fba..6e66d9148 100644 --- a/aquadoggo/src/graphql/schema.rs +++ b/aquadoggo/src/graphql/schema.rs @@ -194,7 +194,7 @@ impl GraphQLSchemaManager { let shared = self.shared.clone(); let schemas = self.schemas.clone(); - info!("Subscribing GraphQL manager to schema provider"); + debug!("Subscribing GraphQL manager to schema provider"); let mut on_schema_added = shared.schema_provider.on_schema_added(); // Create the new GraphQL based on the current state of known p2panda application schemas diff --git a/aquadoggo/src/network/peers/behaviour.rs b/aquadoggo/src/network/peers/behaviour.rs index 443b4d2cd..790e37ade 100644 --- a/aquadoggo/src/network/peers/behaviour.rs +++ b/aquadoggo/src/network/peers/behaviour.rs @@ -10,7 +10,7 @@ use libp2p::swarm::{ PollParameters, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; use libp2p::{Multiaddr, PeerId}; -use log::{info, trace}; +use log::trace; use p2panda_rs::Human; use crate::network::peers::handler::{Handler, HandlerInEvent, HandlerOutEvent}; @@ -43,14 +43,12 @@ impl Behaviour { fn on_connection_established(&mut self, peer_id: PeerId, connection_id: ConnectionId) { let peer = Peer::new(peer_id, connection_id); - info!("New peer connected: {peer}"); self.events .push_back(ToSwarm::GenerateEvent(Event::PeerConnected(peer))); } fn on_connection_closed(&mut self, peer_id: PeerId, connection_id: ConnectionId) { let peer = Peer::new(peer_id, connection_id); - info!("Peer disconnected: {peer}"); self.events .push_back(ToSwarm::GenerateEvent(Event::PeerDisconnected(peer))); } @@ -63,7 +61,8 @@ impl Behaviour { ) { let peer = Peer::new(peer_id, connection_id); trace!( - "Notify swarm of received sync message: {peer} {}", + "Notify swarm of received sync message: {} {}", + peer.display(), message.display() ); self.events @@ -74,8 +73,9 @@ impl Behaviour { pub fn send_message(&mut self, peer: Peer, message: SyncMessage) { trace!( - "Notify handler of sent sync message: {peer} {}", - message.display() + "Notify handler of sent sync message: {} {}", + peer.display(), + message.display(), ); self.events.push_back(ToSwarm::NotifyHandler { peer_id: peer.id(), diff --git a/aquadoggo/src/network/peers/peer.rs b/aquadoggo/src/network/peers/peer.rs index e62dbfc16..9fc55f8e0 100644 --- a/aquadoggo/src/network/peers/peer.rs +++ b/aquadoggo/src/network/peers/peer.rs @@ -1,10 +1,10 @@ // SPDX-License-Identifier: AGPL-3.0-or-later use std::cmp::Ordering; -use std::fmt::{Display, Formatter, Result}; use libp2p::swarm::ConnectionId; use libp2p::PeerId; +use p2panda_rs::Human; #[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] pub struct Peer(PeerId, ConnectionId); @@ -39,8 +39,10 @@ impl PartialOrd for Peer { } } -impl Display for Peer { - fn fmt(&self, f: &mut Formatter<'_>) -> Result { - write!(f, "Peer({}, {:?})", self.0, self.1) +impl Human for Peer { + fn display(&self) -> String { + // Trick to nicely display `ConnectionId` struct + let connection_id = &format!("{:?}", self.1)[13..][..1]; + format!("{} ({})", self.0, connection_id) } } diff --git a/aquadoggo/src/network/service.rs b/aquadoggo/src/network/service.rs index 35fe2e224..802c090b1 100644 --- a/aquadoggo/src/network/service.rs +++ b/aquadoggo/src/network/service.rs @@ -3,9 +3,9 @@ use anyhow::Result; use libp2p::multiaddr::Protocol; use libp2p::ping::Event; -use libp2p::swarm::{AddressScore, SwarmEvent}; +use libp2p::swarm::{AddressScore, ConnectionError, SwarmEvent}; use libp2p::{autonat, identify, mdns, rendezvous, Multiaddr, Swarm}; -use log::{debug, info, trace, warn}; +use log::{debug, trace, warn}; use tokio::task; use tokio_stream::wrappers::BroadcastStream; use tokio_stream::StreamExt; @@ -87,8 +87,6 @@ pub async fn network_service( let event_loop = EventLoop::new(swarm, tx, external_circuit_addr, network_config); let handle = task::spawn(event_loop.run()); - info!("Network service is ready"); - if tx_ready.send(()).is_err() { warn!("No subscriber informed about network service being ready"); }; @@ -181,13 +179,13 @@ impl EventLoop { // ~~~~~ // Swarm // ~~~~~ - SwarmEvent::Dialing(peer_id) => info!("Dialing: {peer_id}"), + SwarmEvent::Dialing(peer_id) => trace!("Dialing: {peer_id}"), SwarmEvent::ConnectionEstablished { peer_id, num_established, .. } => { - info!("Established new connection (total {num_established}) with {peer_id}"); + trace!("Established new connection (total {num_established}) with {peer_id}"); // Match on a connection with the rendezvous server if let Some(rendezvous_peer_id) = self.network_config.rendezvous_peer_id { @@ -207,44 +205,62 @@ impl EventLoop { } } } - SwarmEvent::ConnectionClosed { - peer_id, - num_established, - cause, - .. - } => { - info!("Connection closed (total {num_established}) with {peer_id}: {cause:?}"); - } + SwarmEvent::ConnectionClosed { peer_id, cause, .. } => match cause { + Some(ConnectionError::IO(error)) => { + if error.to_string() == "timed out" { + // Sometimes we receive time out errors from here + debug!("Connection timed out with peer {peer_id}"); + } else { + warn!("Connection error occurred with peer {peer_id}: {error}"); + } + } + Some(ConnectionError::KeepAliveTimeout) => { + debug!("Connection timed out with peer {peer_id}"); + } + Some(ConnectionError::Handler(_)) => { + warn!("Connection handler error occurred with peer {peer_id}"); + } + None => { + debug!("Connection closed with peer {peer_id}"); + } + }, SwarmEvent::ExpiredListenAddr { listener_id, address, - } => trace!("ExpiredListenAddr: {listener_id:?} {address}"), + } => trace!("Expired listen address: {listener_id:?} {address}"), SwarmEvent::IncomingConnection { local_addr, send_back_addr, - } => debug!("IncomingConnection: {local_addr} {send_back_addr}"), + } => trace!("Incoming connection: {local_addr} {send_back_addr}"), SwarmEvent::IncomingConnectionError { local_addr, send_back_addr, error, - } => warn!("IncomingConnectionError: {local_addr} {send_back_addr} {error:?}"), + } => { + warn!("Incoming connection error occurred with {local_addr} and {send_back_addr}: {error}"); + } SwarmEvent::ListenerClosed { listener_id, addresses, reason, - } => trace!("ListenerClosed: {listener_id:?} {addresses:?} {reason:?}"), - SwarmEvent::ListenerError { listener_id, error } => { - warn!("ListenerError: {listener_id:?} {error:?}") + } => trace!("Listener closed: {listener_id:?} {addresses:?} {reason:?}"), + SwarmEvent::ListenerError { error, .. } => { + warn!("Listener failed with error: {error}") } SwarmEvent::NewListenAddr { address, listener_id: _, } => { - info!("Listening on {address}"); - } - SwarmEvent::OutgoingConnectionError { peer_id, error } => { - warn!("OutgoingConnectionError: {peer_id:?} {error:?}"); + debug!("Listening on {address}"); } + SwarmEvent::OutgoingConnectionError { peer_id, error } => match peer_id { + Some(id) => { + warn!("Outgoing connection error with peer {id} occurred: {error}"); + } + None => { + warn!("Outgoing connection error occurred: {error}"); + } + }, // ~~~~ // mDNS @@ -273,7 +289,7 @@ impl EventLoop { // Ping // ~~~~ SwarmEvent::Behaviour(BehaviourEvent::Ping(Event { peer, result: _ })) => { - debug!("Ping from: {peer}") + trace!("Ping from: {peer}") } // ~~~~~~~~~~ @@ -381,10 +397,10 @@ impl EventLoop { // Relay // ~~~~~ SwarmEvent::Behaviour(BehaviourEvent::RelayServer(event)) => { - debug!("Unhandled relay server event: {event:?}") + trace!("Unhandled relay server event: {event:?}") } SwarmEvent::Behaviour(BehaviourEvent::RelayClient(event)) => { - debug!("Unhandled relay client event: {event:?}") + trace!("Unhandled relay client event: {event:?}") } // ~~~~~~~ diff --git a/aquadoggo/src/network/swarm.rs b/aquadoggo/src/network/swarm.rs index 2a31698ed..ad17d65e6 100644 --- a/aquadoggo/src/network/swarm.rs +++ b/aquadoggo/src/network/swarm.rs @@ -17,7 +17,7 @@ pub async fn build_swarm( key_pair: Keypair, ) -> Result> { let peer_id = network_config.peer_id.expect("Peer id needs to be given"); - info!("Network service peer ID: {peer_id}",); + info!("Local peer id: {peer_id}"); let relay_client_enabled = network_config.relay_address.is_some(); diff --git a/aquadoggo/src/replication/manager.rs b/aquadoggo/src/replication/manager.rs index 2edf8fc4a..e4d073d75 100644 --- a/aquadoggo/src/replication/manager.rs +++ b/aquadoggo/src/replication/manager.rs @@ -1,6 +1,7 @@ // SPDX-License-Identifier: AGPL-3.0-or-later use std::collections::HashMap; +use std::hash::Hash; use anyhow::Result; use log::{debug, info, trace, warn}; @@ -52,7 +53,7 @@ pub struct SyncManager

{ impl

SyncManager

where - P: Clone + std::fmt::Debug + std::hash::Hash + Eq + PartialOrd, + P: Clone + Human + Hash + Eq + PartialOrd, { pub fn new(store: SqlStore, ingest: SyncIngest, local_peer: P) -> Self { Self { @@ -68,7 +69,7 @@ where /// Warning: This might also remove actively running sessions. Do only clear sessions when you /// are sure they are a) done or b) the peer closed its connection. pub fn remove_sessions(&mut self, remote_peer: &P) { - debug!("Remove all sessions with peer: {remote_peer:?}"); + debug!("Remove all sessions with peer: {}", remote_peer.display()); self.sessions.remove(remote_peer); } @@ -129,13 +130,24 @@ where .enumerate() .find(|(_, session)| session.id == *session_id) { - debug!("Remove session {session_id} with peer: {remote_peer:?}"); + debug!( + "Remove session {} with peer: {}", + session_id, + remote_peer.display() + ); sessions.remove(index); } else { - warn!("Tried to remove nonexistent session {session_id} with peer: {remote_peer:?}") + warn!( + "Tried to remove nonexistent session {} with peer: {}", + session_id, + remote_peer.display() + ); } } else { - warn!("Tried to remove sessions from unknown peer: {remote_peer:?}") + warn!( + "Tried to remove sessions from unknown peer: {}", + remote_peer.display() + ); } } @@ -160,8 +172,8 @@ where let sessions = self.get_sessions(remote_peer); info!( - "Initiate outbound replication session with peer {:?}", - remote_peer + "Initiate outbound replication session with peer {}", + remote_peer.display() ); // Make sure to not have duplicate sessions over the same schema ids @@ -376,8 +388,8 @@ where }; info!( - "Accept inbound replication session with peer {:?}", - remote_peer + "Accept inbound replication session with peer {}", + remote_peer.display() ); let messages = self @@ -394,8 +406,10 @@ where message: &Message, ) -> Result { trace!( - "Message received: {session_id} {remote_peer:?} {}", - message.display() + "Message received: {} {} {}", + session_id, + remote_peer.display(), + message.display(), ); let sessions = self.sessions.get_mut(remote_peer); @@ -414,11 +428,11 @@ where } else { Err(ReplicationError::NoSessionFound( *session_id, - format!("{remote_peer:?}"), + remote_peer.display(), )) } } - None => Err(ReplicationError::NoPeerFound(format!("{remote_peer:?}"))), + None => Err(ReplicationError::NoPeerFound(remote_peer.display())), }?; // We're done, clean up after ourselves @@ -466,7 +480,7 @@ where } else { Err(ReplicationError::NoSessionFound( *session_id, - format!("{remote_peer:?}"), + remote_peer.display(), )) } } diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index e5406b15d..87c3f90d4 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -5,8 +5,9 @@ use std::time::Duration; use anyhow::Result; use libp2p::PeerId; -use log::{debug, info, warn}; +use log::{debug, info, trace, warn}; use p2panda_rs::schema::SchemaId; +use p2panda_rs::Human; use tokio::task; use tokio::time::interval; use tokio_stream::wrappers::{BroadcastStream, IntervalStream}; @@ -133,17 +134,17 @@ impl ConnectionManager { fn remove_connection(&mut self, peer: Peer) { match self.peers.remove(&peer) { - Some(_) => debug!("Remove peer: {peer}"), + Some(_) => debug!("Remove peer: {}", peer.display()), None => warn!("Tried to remove connection from unknown peer"), } } async fn on_connection_established(&mut self, peer: Peer) { - info!("Connected to peer: {peer}"); + info!("Connected to peer: {}", peer.display()); match self.peers.get(&peer) { Some(_) => { - warn!("Peer already known: {peer}"); + warn!("Peer already known: {}", peer.display()); } None => { self.peers.insert(peer, PeerStatus::new(peer)); @@ -153,7 +154,7 @@ impl ConnectionManager { } async fn on_connection_closed(&mut self, peer: Peer) { - info!("Disconnected from peer: {peer}"); + info!("Disconnected from peer: {}", peer.display()); // Clear running replication sessions from sync manager self.sync_manager.remove_sessions(&peer); @@ -182,7 +183,7 @@ impl ConnectionManager { } async fn on_replication_finished(&mut self, peer: Peer, _session_id: SessionId) { - info!("Finished replication with peer {peer}"); + info!("Finished replication with peer {}", peer.display()); match self.peers.get_mut(&peer) { Some(status) => { @@ -200,7 +201,7 @@ impl ConnectionManager { session_id: SessionId, error: ReplicationError, ) { - warn!("Replication with peer {} failed: {}", peer, error); + warn!("Replication with peer {} failed: {}", peer.display(), error); match self.peers.get_mut(&peer) { Some(status) => { @@ -273,7 +274,7 @@ impl ConnectionManager { .collect(); if attempt_peers.is_empty() { - debug!("No peers available for replication") + trace!("No peers available for replication") } for peer in attempt_peers { From bca3e168d7f9acd416b766e6244ef8b248d5c8aa Mon Sep 17 00:00:00 2001 From: adz Date: Sun, 18 Jun 2023 13:49:20 +0200 Subject: [PATCH 116/126] A little bit less verbose logging --- aquadoggo/src/replication/ingest.rs | 4 ++-- aquadoggo/src/replication/manager.rs | 16 ++++------------ 2 files changed, 6 insertions(+), 14 deletions(-) diff --git a/aquadoggo/src/replication/ingest.rs b/aquadoggo/src/replication/ingest.rs index 75951db0f..344286662 100644 --- a/aquadoggo/src/replication/ingest.rs +++ b/aquadoggo/src/replication/ingest.rs @@ -1,6 +1,6 @@ // SPDX-License-Identifier: AGPL-3.0-or-later -use log::debug; +use log::trace; use p2panda_rs::api::validation::{ ensure_document_not_deleted, get_checked_document_id_for_view_id, get_expected_skiplink, is_next_seq_num, validate_claimed_schema_id, @@ -132,7 +132,7 @@ impl SyncIngest { ) -> Result<(), IngestError> { let entry = decode_entry(encoded_entry)?; - debug!( + trace!( "Received entry {:?} for log {:?} and {}", entry.seq_num(), entry.log_id(), diff --git a/aquadoggo/src/replication/manager.rs b/aquadoggo/src/replication/manager.rs index e4d073d75..dbf5468ed 100644 --- a/aquadoggo/src/replication/manager.rs +++ b/aquadoggo/src/replication/manager.rs @@ -69,7 +69,6 @@ where /// Warning: This might also remove actively running sessions. Do only clear sessions when you /// are sure they are a) done or b) the peer closed its connection. pub fn remove_sessions(&mut self, remote_peer: &P) { - debug!("Remove all sessions with peer: {}", remote_peer.display()); self.sessions.remove(remote_peer); } @@ -130,11 +129,6 @@ where .enumerate() .find(|(_, session)| session.id == *session_id) { - debug!( - "Remove session {} with peer: {}", - session_id, - remote_peer.display() - ); sessions.remove(index); } else { warn!( @@ -309,8 +303,7 @@ where if &self.local_peer < remote_peer { // Drop our pending session debug!( - "Drop pending outbound session and process inbound session request with duplicate target set {:?}", - existing_session.target_set() + "Drop pending outbound session and process inbound session request with duplicate target set" ); self.remove_session(remote_peer, &existing_session.id); @@ -319,8 +312,7 @@ where } else { // Keep our pending session, ignore inbound request debug!( - "Ignore inbound request and keep pending outbound session with duplicate target set {:?}", - existing_session.target_set() + "Ignore inbound request and keep pending outbound session with duplicate target set", ); Ok(false) } @@ -370,7 +362,7 @@ where .iter() .find(|existing_session| existing_session.id == *session_id) { - debug!("Handle sync request containing duplicate session id"); + trace!("Handle sync request containing duplicate session id"); return self .handle_duplicate_session(remote_peer, target_set, existing_session) .await; @@ -381,7 +373,7 @@ where .iter() .find(|session| session.target_set() == *target_set) { - debug!("Handle sync request containing duplicate target sets"); + trace!("Handle sync request containing duplicate target sets"); return self .handle_duplicate_target_set(remote_peer, session_id, mode, session) .await; From c8e6f04691a9f1e5c5d49a317f28c1a0f0147b5d Mon Sep 17 00:00:00 2001 From: adz Date: Sun, 18 Jun 2023 14:25:26 +0200 Subject: [PATCH 117/126] Fix tests --- aquadoggo/src/network/peers/behaviour.rs | 157 +++++++++++---------- aquadoggo/src/replication/manager.rs | 168 ++++++++++++++--------- 2 files changed, 187 insertions(+), 138 deletions(-) diff --git a/aquadoggo/src/network/peers/behaviour.rs b/aquadoggo/src/network/peers/behaviour.rs index 790e37ade..8e9be6139 100644 --- a/aquadoggo/src/network/peers/behaviour.rs +++ b/aquadoggo/src/network/peers/behaviour.rs @@ -176,11 +176,12 @@ impl NetworkBehaviour for Behaviour { #[cfg(test)] mod tests { use futures::FutureExt; - use libp2p::swarm::{keep_alive, Swarm}; + use libp2p::swarm::{keep_alive, ConnectionId, Swarm}; use libp2p_swarm_test::SwarmExt; use p2panda_rs::schema::SchemaId; use rstest::rstest; + use crate::network::Peer; use crate::replication::{Message, SyncMessage, TargetSet}; use crate::test_utils::helpers::random_target_set; @@ -189,23 +190,23 @@ mod tests { #[tokio::test] async fn peers_connect() { // Create two swarms - let mut swarm1 = Swarm::new_ephemeral(|_| PeersBehaviour::new()); - let mut swarm2 = Swarm::new_ephemeral(|_| PeersBehaviour::new()); + let mut swarm_1 = Swarm::new_ephemeral(|_| PeersBehaviour::new()); + let mut swarm_2 = Swarm::new_ephemeral(|_| PeersBehaviour::new()); - // Listen on swarm1 and connect from swarm2, this should establish a bi-directional + // Listen on swarm_1 and connect from swarm_2, this should establish a bi-directional // connection. - swarm1.listen().await; - swarm2.connect(&mut swarm1).await; + swarm_1.listen().await; + swarm_2.connect(&mut swarm_1).await; - let swarm1_peer_id = *swarm1.local_peer_id(); - let swarm2_peer_id = *swarm2.local_peer_id(); + let swarm_1_peer_id = *swarm_1.local_peer_id(); + let swarm_2_peer_id = *swarm_2.local_peer_id(); - let info1 = swarm1.network_info(); - let info2 = swarm2.network_info(); + let info1 = swarm_1.network_info(); + let info2 = swarm_2.network_info(); // Peers should be connected. - assert!(swarm2.is_connected(&swarm1_peer_id)); - assert!(swarm1.is_connected(&swarm2_peer_id)); + assert!(swarm_2.is_connected(&swarm_1_peer_id)); + assert!(swarm_1.is_connected(&swarm_2_peer_id)); // Each swarm should have exactly one connected peer. assert_eq!(info1.num_peers(), 1); @@ -219,25 +220,25 @@ mod tests { #[tokio::test] async fn incompatible_network_behaviour() { // Create two swarms - let mut swarm1 = Swarm::new_ephemeral(|_| PeersBehaviour::new()); - let mut swarm2 = Swarm::new_ephemeral(|_| keep_alive::Behaviour); + let mut swarm_1 = Swarm::new_ephemeral(|_| PeersBehaviour::new()); + let mut swarm_2 = Swarm::new_ephemeral(|_| keep_alive::Behaviour); - // Listen on swarm1 and connect from swarm2, this should establish a bi-directional connection. - swarm1.listen().await; - swarm2.connect(&mut swarm1).await; + // Listen on swarm_1 and connect from swarm_2, this should establish a bi-directional connection. + swarm_1.listen().await; + swarm_2.connect(&mut swarm_1).await; - let swarm1_peer_id = *swarm1.local_peer_id(); - let swarm2_peer_id = *swarm2.local_peer_id(); + let swarm_1_peer_id = *swarm_1.local_peer_id(); + let swarm_2_peer_id = *swarm_2.local_peer_id(); - let info1 = swarm1.network_info(); - let info2 = swarm2.network_info(); + let info1 = swarm_1.network_info(); + let info2 = swarm_2.network_info(); // Even though the network behaviours of our two peers are incompatible they still // establish a connection. // Peers should be connected. - assert!(swarm2.is_connected(&swarm1_peer_id)); - assert!(swarm1.is_connected(&swarm2_peer_id)); + assert!(swarm_2.is_connected(&swarm_1_peer_id)); + assert!(swarm_1.is_connected(&swarm_2_peer_id)); // Each swarm should have exactly one connected peer. assert_eq!(info1.num_peers(), 1); @@ -247,16 +248,16 @@ mod tests { assert_eq!(info1.connection_counters().num_established(), 1); assert_eq!(info2.connection_counters().num_established(), 1); - // Send a message from to swarm1 local peer from swarm2 local peer. - swarm1.behaviour_mut().send_message( - swarm2_peer_id, + // Send a message from to swarm_1 local peer from swarm_2 local peer. + swarm_1.behaviour_mut().send_message( + Peer::new(swarm_2_peer_id, ConnectionId::new_unchecked(1)), SyncMessage::new(0, Message::SyncRequest(0.into(), TargetSet::new(&vec![]))), ); - // Await a swarm event on swarm2. + // Await a swarm event on swarm_2. // // We expect a timeout panic as no event will occur. - let result = std::panic::AssertUnwindSafe(swarm2.next_swarm_event()) + let result = std::panic::AssertUnwindSafe(swarm_2.next_swarm_event()) .catch_unwind() .await; @@ -264,79 +265,87 @@ mod tests { } #[rstest] - #[case(TargetSet::new(&vec![SchemaId::SchemaFieldDefinition(0)]), TargetSet::new(&vec![SchemaId::SchemaDefinition(0)]))] + #[case( + TargetSet::new(&vec![SchemaId::SchemaFieldDefinition(0)]), + TargetSet::new(&vec![SchemaId::SchemaDefinition(0)]), + )] #[case(random_target_set(), random_target_set())] #[tokio::test] async fn swarm_behaviour_events( #[case] target_set_1: TargetSet, #[case] target_set_2: TargetSet, ) { - // Create two swarms - let mut swarm1 = Swarm::new_ephemeral(|_| PeersBehaviour::new()); - let mut swarm2 = Swarm::new_ephemeral(|_| PeersBehaviour::new()); - - // Listen on swarm1 and connect from swarm2, this should establish a bi-directional connection. - swarm1.listen().await; - swarm2.connect(&mut swarm1).await; + let mut swarm_1 = Swarm::new_ephemeral(|_| PeersBehaviour::new()); + let mut swarm_2 = Swarm::new_ephemeral(|_| PeersBehaviour::new()); - let mut res1 = Vec::new(); - let mut res2 = Vec::new(); + // Listen on swarm_1 and connect from swarm_2, this should establish a bi-directional + // connection + swarm_1.listen().await; + swarm_2.connect(&mut swarm_1).await; - let swarm1_peer_id = *swarm1.local_peer_id(); - let swarm2_peer_id = *swarm2.local_peer_id(); + let mut events_1 = Vec::new(); + let mut events_2 = Vec::new(); - // Send a message from swarm1 to peer2. - swarm1.behaviour_mut().send_message( - swarm2_peer_id, - SyncMessage::new(0, Message::SyncRequest(0.into(), target_set_1.clone())), - ); - - // Send a message from swarm2 peer1. - swarm2.behaviour_mut().send_message( - swarm1_peer_id, - SyncMessage::new(1, Message::SyncRequest(0.into(), target_set_2.clone())), - ); + let swarm_1_peer_id = *swarm_1.local_peer_id(); + let swarm_2_peer_id = *swarm_2.local_peer_id(); // Collect the next 2 behaviour events which occur in either swarms. for _ in 0..2 { tokio::select! { - Event::PeerConnected(peer_id) = swarm1.next_behaviour_event() => res1.push((peer_id, None)), - Event::PeerConnected(peer_id) = swarm2.next_behaviour_event() => res2.push((peer_id, None)), + Event::PeerConnected(peer) = swarm_1.next_behaviour_event() => { + events_1.push((peer, None)); + }, + Event::PeerConnected(peer) = swarm_2.next_behaviour_event() => events_2.push((peer, None)), } } - // And again add the next 2 behaviour events which occur in either swarms. + assert_eq!(events_1.len(), 1); + assert_eq!(events_2.len(), 1); + + // The first event should have been a ConnectionEstablished containing the expected peer + // id + let (peer_2, message) = events_1[0].clone(); + assert_eq!(peer_2.id(), swarm_2_peer_id); + assert!(message.is_none()); + + let (peer_1, message) = events_2[0].clone(); + assert_eq!(peer_1.id(), swarm_1_peer_id); + assert!(message.is_none()); + + // Send a message from swarm_1 to swarm_2 + swarm_1.behaviour_mut().send_message( + peer_2, + SyncMessage::new(0, Message::SyncRequest(0.into(), target_set_1.clone())), + ); + + // Send a message from swarm_2 to swarm_1 + swarm_2.behaviour_mut().send_message( + peer_1, + SyncMessage::new(1, Message::SyncRequest(0.into(), target_set_2.clone())), + ); + + // And again add the next behaviour events which occur in either swarms for _ in 0..2 { tokio::select! { - Event::MessageReceived(peer_id, message) = swarm1.next_behaviour_event() => res1.push((peer_id, Some(message))), - Event::MessageReceived(peer_id, message) = swarm2.next_behaviour_event() => res2.push((peer_id, Some(message))), + Event::MessageReceived(peer, message) = swarm_1.next_behaviour_event() => events_1.push((peer, Some(message))), + Event::MessageReceived(peer, message) = swarm_2.next_behaviour_event() => events_2.push((peer, Some(message))), } } - // Each swarm should have emitted exactly one event. - assert_eq!(res1.len(), 2); - assert_eq!(res2.len(), 2); - - // The first event should have been a ConnectionEstablished containing the expected peer id. - let (peer_id, message) = res1[0].clone(); - assert_eq!(peer_id, swarm2_peer_id); - assert!(message.is_none()); - - let (peer_id, message) = res2[0].clone(); - assert_eq!(peer_id, swarm1_peer_id); - assert!(message.is_none()); + assert_eq!(events_1.len(), 2); + assert_eq!(events_2.len(), 2); - // swarm1 should have received the message from swarm2 peer. - let (peer_id, message) = res1[1].clone(); - assert_eq!(peer_id, swarm2_peer_id); + // swarm_1 should have received the message from swarm_2 peer + let (peer, message) = events_1[1].clone(); + assert_eq!(peer.id(), swarm_2_peer_id); assert_eq!( message.unwrap(), SyncMessage::new(1, Message::SyncRequest(0.into(), target_set_2.clone())) ); - // swarm2 should have received the message from swarm1 peer. - let (peer_id, message) = res2[1].clone(); - assert_eq!(peer_id, swarm1_peer_id); + // swarm_2 should have received the message from swarm_1 peer + let (peer, message) = events_2[1].clone(); + assert_eq!(peer.id(), swarm_1_peer_id); assert_eq!( message.unwrap(), SyncMessage::new(0, Message::SyncRequest(0.into(), target_set_1)) diff --git a/aquadoggo/src/replication/manager.rs b/aquadoggo/src/replication/manager.rs index dbf5468ed..023dc5bc3 100644 --- a/aquadoggo/src/replication/manager.rs +++ b/aquadoggo/src/replication/manager.rs @@ -507,6 +507,7 @@ where #[cfg(test)] mod tests { use p2panda_rs::test_utils::memory_store::helpers::PopulateStoreConfig; + use p2panda_rs::Human; use rstest::rstest; use tokio::sync::broadcast; @@ -522,33 +523,48 @@ mod tests { use super::{SyncManager, INITIAL_SESSION_ID}; - const PEER_ID_LOCAL: &str = "local"; - const PEER_ID_REMOTE: &str = "remote"; + #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] + struct Peer(String); + + impl Peer { + pub fn new(id: &str) -> Self { + Self(id.to_string()) + } + } + + impl Human for Peer { + fn display(&self) -> String { + self.0.clone() + } + } #[rstest] fn initiate_outbound_session( #[from(random_target_set)] target_set_1: TargetSet, #[from(random_target_set)] target_set_2: TargetSet, ) { + let peer_id_local: Peer = Peer::new("local"); + let peer_id_remote: Peer = Peer::new("remote"); + test_runner(move |node: TestNode| async move { let mode = Mode::Naive; let (tx, _rx) = broadcast::channel(8); let ingest = SyncIngest::new(SchemaProvider::default(), tx); - let mut manager = SyncManager::new(node.context.store.clone(), ingest, PEER_ID_LOCAL); + let mut manager = SyncManager::new(node.context.store.clone(), ingest, peer_id_local); let result = manager - .initiate_session(&PEER_ID_REMOTE, &target_set_1, &mode) + .initiate_session(&peer_id_remote, &target_set_1, &mode) .await; assert!(result.is_ok()); let result = manager - .initiate_session(&PEER_ID_REMOTE, &target_set_2, &mode) + .initiate_session(&peer_id_remote, &target_set_2, &mode) .await; assert!(result.is_ok()); // Expect error when initiating a session for the same target set let result = manager - .initiate_session(&PEER_ID_REMOTE, &target_set_1, &mode) + .initiate_session(&peer_id_remote, &target_set_1, &mode) .await; assert!(matches!( result, @@ -563,26 +579,29 @@ mod tests { #[from(random_target_set)] target_set_2: TargetSet, #[from(random_target_set)] target_set_3: TargetSet, ) { + let peer_id_local: Peer = Peer::new("local"); + let peer_id_remote: Peer = Peer::new("remote"); + test_runner(move |node: TestNode| async move { let (tx, _rx) = broadcast::channel(8); let ingest = SyncIngest::new(SchemaProvider::default(), tx); - let mut manager = SyncManager::new(node.context.store.clone(), ingest, PEER_ID_LOCAL); + let mut manager = SyncManager::new(node.context.store.clone(), ingest, peer_id_local); let message = SyncMessage::new(0, Message::SyncRequest(Mode::Naive, target_set_1.clone())); - let result = manager.handle_message(&PEER_ID_REMOTE, &message).await; + let result = manager.handle_message(&peer_id_remote, &message).await; assert!(result.is_ok()); let message = SyncMessage::new(1, Message::SyncRequest(Mode::Naive, target_set_2.clone())); - let result = manager.handle_message(&PEER_ID_REMOTE, &message).await; + let result = manager.handle_message(&peer_id_remote, &message).await; assert!(result.is_ok()); // Reject attempt to create session again let message = SyncMessage::new(0, Message::SyncRequest(Mode::Naive, target_set_3.clone())); - let result = manager.handle_message(&PEER_ID_REMOTE, &message).await; + let result = manager.handle_message(&peer_id_remote, &message).await; assert!(matches!(result, Err(ReplicationError::DuplicateSession(err)) if err == DuplicateSessionRequestError::InboundPendingSession(0) )); @@ -590,7 +609,7 @@ mod tests { // Reject different session concerning same target set let message = SyncMessage::new(2, Message::SyncRequest(Mode::Naive, target_set_2.clone())); - let result = manager.handle_message(&PEER_ID_REMOTE, &message).await; + let result = manager.handle_message(&peer_id_remote, &message).await; assert!(matches!( result, Err(ReplicationError::DuplicateSession(err)) if err == DuplicateSessionRequestError::InboundExistingTargetSet(target_set_2) @@ -638,6 +657,9 @@ mod tests { #[from(random_target_set)] target_set_1: TargetSet, #[from(random_target_set)] target_set_2: TargetSet, ) { + let peer_id_local: Peer = Peer::new("local"); + let peer_id_remote: Peer = Peer::new("remote"); + test_runner(move |node: TestNode| async move { let mode = Mode::Naive; let (tx, _rx) = broadcast::channel(8); @@ -647,16 +669,19 @@ mod tests { // // This is important for testing the deterministic handling of concurrent session // requests which contain the same session id. - assert!(PEER_ID_LOCAL < PEER_ID_REMOTE); + assert!(peer_id_local < peer_id_remote); // Sanity check: Target sets need to be different assert!(target_set_1 != target_set_2); // Local peer A initiates a session with id 0 and target set 1. - let mut manager_a = - SyncManager::new(node.context.store.clone(), ingest.clone(), PEER_ID_LOCAL); + let mut manager_a = SyncManager::new( + node.context.store.clone(), + ingest.clone(), + peer_id_local.clone(), + ); let result = manager_a - .initiate_session(&PEER_ID_REMOTE, &target_set_1, &mode) + .initiate_session(&peer_id_remote, &target_set_1, &mode) .await .unwrap(); @@ -667,9 +692,9 @@ mod tests { // // Note that both peers use the _same_ session id but _different_ target sets. let mut manager_b = - SyncManager::new(node.context.store.clone(), ingest, PEER_ID_REMOTE); + SyncManager::new(node.context.store.clone(), ingest, peer_id_remote.clone()); let result = manager_b - .initiate_session(&PEER_ID_LOCAL, &target_set_2, &mode) + .initiate_session(&peer_id_local, &target_set_2, &mode) .await .unwrap(); @@ -678,7 +703,7 @@ mod tests { // Both peers send and handle the requests concurrently. let result = manager_a - .handle_message(&PEER_ID_REMOTE, &sync_request_b) + .handle_message(&peer_id_remote, &sync_request_b) .await .unwrap(); @@ -695,7 +720,7 @@ mod tests { ); let result = manager_b - .handle_message(&PEER_ID_LOCAL, &sync_request_a) + .handle_message(&peer_id_local, &sync_request_a) .await .unwrap(); @@ -705,17 +730,17 @@ mod tests { // Peer A has two sessions running: The one initiated by Peer B and the one it // re-initiated itself with the new session id - let manager_a_sessions = manager_a.get_sessions(&PEER_ID_REMOTE); + let manager_a_sessions = manager_a.get_sessions(&peer_id_remote); assert_eq!(manager_a_sessions.len(), 2); // Peer B has still one running, it didn't learn about the re-initiated session of A // yet - let manager_b_sessions = manager_b.get_sessions(&PEER_ID_LOCAL); + let manager_b_sessions = manager_b.get_sessions(&peer_id_local); assert_eq!(manager_b_sessions.len(), 1); // Peer B processes the `Have`, `SyncDone` and `SyncRequest` messages from Peer A. let result = manager_b - .handle_message(&PEER_ID_LOCAL, &have_message_a) + .handle_message(&peer_id_local, &have_message_a) .await; let response = result.unwrap(); assert_eq!(response.messages.len(), 2); @@ -726,19 +751,19 @@ mod tests { // Sync done, they send no more messages. let result = manager_b - .handle_message(&PEER_ID_LOCAL, &done_message_a) + .handle_message(&peer_id_local, &done_message_a) .await; let response = result.unwrap(); assert_eq!(response.messages.len(), 0); // Peer B should have closed the session for good - let manager_b_sessions = manager_b.get_sessions(&PEER_ID_LOCAL); + let manager_b_sessions = manager_b.get_sessions(&peer_id_local); assert_eq!(manager_b_sessions.len(), 0); // Now the second, re-established sync request from peer A concerning another target // set arrives at peer B let result = manager_b - .handle_message(&PEER_ID_LOCAL, &sync_request_a_corrected) + .handle_message(&peer_id_local, &sync_request_a_corrected) .await; let response = result.unwrap(); assert_eq!(response.messages.len(), 2); @@ -748,32 +773,32 @@ mod tests { (response.messages[0].clone(), response.messages[1].clone()); // Peer B should now know about one session again - let manager_b_sessions = manager_b.get_sessions(&PEER_ID_LOCAL); + let manager_b_sessions = manager_b.get_sessions(&peer_id_local); assert_eq!(manager_b_sessions.len(), 1); // Peer A processes both the `Have` and `SyncDone` messages from Peer B for the first // session and produces no new messages. We're done with this session on Peer A as // well now. let result = manager_a - .handle_message(&PEER_ID_REMOTE, &have_message_b) + .handle_message(&peer_id_remote, &have_message_b) .await; let response = result.unwrap(); assert_eq!(response.messages.len(), 0); let result = manager_a - .handle_message(&PEER_ID_REMOTE, &done_message_b) + .handle_message(&peer_id_remote, &done_message_b) .await; let response = result.unwrap(); assert_eq!(response.messages.len(), 0); // Peer A should now know about one session again - let manager_a_sessions = manager_a.get_sessions(&PEER_ID_REMOTE); + let manager_a_sessions = manager_a.get_sessions(&peer_id_remote); assert_eq!(manager_a_sessions.len(), 1); // Peer A processes both the re-initiated sessions `Have` and `SyncDone` messages from // Peer B and produces its own answer. let result = manager_a - .handle_message(&PEER_ID_REMOTE, &have_message_b_corrected) + .handle_message(&peer_id_remote, &have_message_b_corrected) .await; let response = result.unwrap(); assert_eq!(response.messages.len(), 2); @@ -782,7 +807,7 @@ mod tests { (response.messages[0].clone(), response.messages[1].clone()); let result = manager_a - .handle_message(&PEER_ID_REMOTE, &done_message_b_corrected) + .handle_message(&peer_id_remote, &done_message_b_corrected) .await; let response = result.unwrap(); assert_eq!(response.messages.len(), 0); @@ -790,22 +815,22 @@ mod tests { // Peer B processes both the re-initiated `Have` and `SyncDone` messages from Peer A // and produces no new messages. let result = manager_b - .handle_message(&PEER_ID_LOCAL, &have_message_a_corrected) + .handle_message(&peer_id_local, &have_message_a_corrected) .await; let response = result.unwrap(); assert_eq!(response.messages.len(), 0); let result = manager_b - .handle_message(&PEER_ID_LOCAL, &done_message_a_corrected) + .handle_message(&peer_id_local, &done_message_a_corrected) .await; let response = result.unwrap(); assert_eq!(response.messages.len(), 0); // After processing all messages both peers should have no sessions remaining. - let manager_a_sessions = manager_a.get_sessions(&PEER_ID_REMOTE); + let manager_a_sessions = manager_a.get_sessions(&peer_id_remote); assert_eq!(manager_a_sessions.len(), 0); - let manager_b_sessions = manager_b.get_sessions(&PEER_ID_LOCAL); + let manager_b_sessions = manager_b.get_sessions(&peer_id_local); assert_eq!(manager_b_sessions.len(), 0); }) } @@ -833,6 +858,9 @@ mod tests { fn concurrent_requests_duplicate_target_set( #[from(random_target_set)] target_set_1: TargetSet, ) { + let peer_id_local: Peer = Peer::new("local"); + let peer_id_remote: Peer = Peer::new("remote"); + test_runner(move |node: TestNode| async move { let mode = Mode::Naive; let (tx, _rx) = broadcast::channel(8); @@ -840,17 +868,20 @@ mod tests { // Local peer id is < than remote, this is important for testing the deterministic // handling of concurrent session requests which contain the same session id. - assert!(PEER_ID_LOCAL < PEER_ID_REMOTE); + assert!(peer_id_local < peer_id_remote); - let mut manager_a = - SyncManager::new(node.context.store.clone(), ingest.clone(), PEER_ID_LOCAL); + let mut manager_a = SyncManager::new( + node.context.store.clone(), + ingest.clone(), + peer_id_local.clone(), + ); let mut manager_b = - SyncManager::new(node.context.store.clone(), ingest, PEER_ID_REMOTE); + SyncManager::new(node.context.store.clone(), ingest, peer_id_remote.clone()); // Local peer A initiates a session with target set A. let result = manager_a - .initiate_session(&PEER_ID_REMOTE, &target_set_1, &mode) + .initiate_session(&peer_id_remote, &target_set_1, &mode) .await; let sync_messages = result.unwrap(); @@ -859,14 +890,14 @@ mod tests { // Remote peer B initiates a session with a dummy peer just to increment the session // id. - let dummy_peer_id = "some_other_peer"; + let dummy_peer_id = Peer::new("some_other_peer"); let _result = manager_b .initiate_session(&dummy_peer_id, &target_set_1, &mode) .await; // Remote peer B initiates a session with target set A. let result = manager_b - .initiate_session(&PEER_ID_LOCAL, &target_set_1, &mode) + .initiate_session(&peer_id_local, &target_set_1, &mode) .await; let sync_messages = result.unwrap(); @@ -878,7 +909,7 @@ mod tests { // Both peers send and handle the requests concurrently. let result = manager_a - .handle_message(&PEER_ID_REMOTE, &sync_request_b) + .handle_message(&peer_id_remote, &sync_request_b) .await; let response = result.unwrap(); @@ -889,7 +920,7 @@ mod tests { (response.messages[0].clone(), response.messages[1].clone()); let result = manager_b - .handle_message(&PEER_ID_LOCAL, &sync_request_a) + .handle_message(&peer_id_local, &sync_request_a) .await; let response = result.unwrap(); @@ -898,15 +929,15 @@ mod tests { assert_eq!(response.messages.len(), 0); // Both peers have exactly one session running. - let manager_a_sessions = manager_a.get_sessions(&PEER_ID_REMOTE); + let manager_a_sessions = manager_a.get_sessions(&peer_id_remote); assert_eq!(manager_a_sessions.len(), 1); - let manager_b_sessions = manager_b.get_sessions(&PEER_ID_LOCAL); + let manager_b_sessions = manager_b.get_sessions(&peer_id_local); assert_eq!(manager_b_sessions.len(), 1); // Peer B processes the `Have` and `SyncDone` messages from Peer A. let result = manager_b - .handle_message(&PEER_ID_LOCAL, &have_message_a) + .handle_message(&peer_id_local, &have_message_a) .await; let response = result.unwrap(); assert_eq!(response.messages.len(), 2); @@ -917,7 +948,7 @@ mod tests { // Sync done, they send no more messages. let result = manager_b - .handle_message(&PEER_ID_LOCAL, &done_message_a) + .handle_message(&peer_id_local, &done_message_a) .await; let response = result.unwrap(); assert_eq!(response.messages.len(), 0); @@ -925,49 +956,55 @@ mod tests { // Peer A processes both the `Have` and `SyncDone` messages from Peer B and produces // no new messages. let result = manager_a - .handle_message(&PEER_ID_REMOTE, &have_message_b) + .handle_message(&peer_id_remote, &have_message_b) .await; let response = result.unwrap(); assert_eq!(response.messages.len(), 0); let result = manager_a - .handle_message(&PEER_ID_REMOTE, &done_message_b) + .handle_message(&peer_id_remote, &done_message_b) .await; let response = result.unwrap(); assert_eq!(response.messages.len(), 0); // After processing all messages both peers should have no sessions remaining. - let manager_a_sessions = manager_a.get_sessions(&PEER_ID_REMOTE); + let manager_a_sessions = manager_a.get_sessions(&peer_id_remote); assert_eq!(manager_a_sessions.len(), 0); - let manager_b_sessions = manager_b.get_sessions(&PEER_ID_LOCAL); + let manager_b_sessions = manager_b.get_sessions(&peer_id_local); assert_eq!(manager_b_sessions.len(), 0); }) } #[rstest] fn inbound_checks_supported_mode(#[from(random_target_set)] target_set: TargetSet) { + let peer_id_local: Peer = Peer::new("local"); + let peer_id_remote: Peer = Peer::new("remote"); + test_runner(move |node: TestNode| async move { let (tx, _rx) = broadcast::channel(8); let ingest = SyncIngest::new(SchemaProvider::default(), tx); // Should not fail when requesting supported replication mode - let mut manager = - SyncManager::new(node.context.store.clone(), ingest.clone(), PEER_ID_LOCAL); + let mut manager = SyncManager::new( + node.context.store.clone(), + ingest.clone(), + peer_id_local.clone(), + ); let message = SyncMessage::new( INITIAL_SESSION_ID, Message::SyncRequest(Mode::Naive, target_set.clone()), ); - let result = manager.handle_message(&PEER_ID_REMOTE, &message).await; + let result = manager.handle_message(&peer_id_remote, &message).await; assert!(result.is_ok()); // Should fail when requesting unsupported replication mode - let mut manager = SyncManager::new(node.context.store.clone(), ingest, PEER_ID_LOCAL); + let mut manager = SyncManager::new(node.context.store.clone(), ingest, peer_id_local); let message = SyncMessage::new( INITIAL_SESSION_ID, Message::SyncRequest(Mode::SetReconciliation, target_set.clone()), ); - let result = manager.handle_message(&PEER_ID_REMOTE, &message).await; + let result = manager.handle_message(&peer_id_remote, &message).await; assert!(result.is_err()); }) } @@ -1004,6 +1041,9 @@ mod tests { config_a: PopulateStoreConfig, #[from(populate_store_config)] config_b: PopulateStoreConfig, ) { + let peer_id_local: Peer = Peer::new("local"); + let peer_id_remote: Peer = Peer::new("remote"); + test_runner_with_manager(|manager: TestNodeManager| async move { let mut node_a = manager.create().await; let mut node_b = manager.create().await; @@ -1017,18 +1057,18 @@ mod tests { let mut manager_a = SyncManager::new( node_a.context.store.clone(), SyncIngest::new(node_a.context.schema_provider.clone(), tx.clone()), - PEER_ID_LOCAL, + peer_id_local.clone(), ); let mut manager_b = SyncManager::new( node_b.context.store.clone(), SyncIngest::new(node_b.context.schema_provider.clone(), tx), - PEER_ID_REMOTE, + peer_id_remote.clone(), ); // Send `SyncRequest` to remote let messages = manager_a - .initiate_session(&PEER_ID_REMOTE, &target_set, &Mode::Naive) + .initiate_session(&peer_id_remote, &target_set, &Mode::Naive) .await .unwrap(); @@ -1043,7 +1083,7 @@ mod tests { // Remote receives `SyncRequest` // Send `Have` and `SyncDone` messages back to local let result = manager_b - .handle_message(&PEER_ID_LOCAL, &messages[0]) + .handle_message(&peer_id_local, &messages[0]) .await .unwrap(); @@ -1059,13 +1099,13 @@ mod tests { // Receive `Have` and `SyncDone` messages from remote // Send `Have`, `Entry` and `SyncDone` messages to remote let result_have = manager_a - .handle_message(&PEER_ID_REMOTE, &result.messages[0]) + .handle_message(&peer_id_remote, &result.messages[0]) .await .unwrap(); assert!(!result_have.is_done); let result_done = manager_a - .handle_message(&PEER_ID_REMOTE, &result.messages[1]) + .handle_message(&peer_id_remote, &result.messages[1]) .await .unwrap(); assert!(result_done.is_done); @@ -1083,7 +1123,7 @@ mod tests { // Remote receives `Have`, `Entry` `SyncDone` messages from local for (index, message) in result_have.messages.iter().enumerate() { let result = manager_b - .handle_message(&PEER_ID_LOCAL, message) + .handle_message(&peer_id_local, message) .await .unwrap(); From b4a77ccd41efe0adf325de0433d9e4dbb657d1de Mon Sep 17 00:00:00 2001 From: adz Date: Sun, 18 Jun 2023 15:25:55 +0200 Subject: [PATCH 118/126] Add a test for connection manager --- aquadoggo/src/replication/service.rs | 72 ++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index 87c3f90d4..8df620328 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -321,3 +321,75 @@ impl ConnectionManager { } } } + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use libp2p::swarm::ConnectionId; + use libp2p::PeerId; + use tokio::sync::broadcast; + + use crate::bus::ServiceMessage; + use crate::network::Peer; + use crate::replication::{Message, Mode, SyncMessage}; + use crate::test_utils::{test_runner, TestNode}; + + use super::ConnectionManager; + + #[test] + fn peer_lifetime() { + let local_peer_id = + PeerId::from_str("12D3KooWD3JAiSNrVGxjC7vJCcjwS8egbtJV9kzrstxLRKiwb9UY").unwrap(); + let remote_peer_id = + PeerId::from_str("12D3KooWCqtLMJQLY3sm9rpDampJ2nPLswPPZto3mrRY7794QATF").unwrap(); + + test_runner(move |node: TestNode| async move { + let (tx, mut rx) = broadcast::channel::(10); + + let mut manager = ConnectionManager::new( + &node.context.schema_provider, + &node.context.store, + &tx, + local_peer_id, + ); + + let target_set = manager.target_set().await; + + // Inform connection manager about new peer + let remote_peer = Peer::new(remote_peer_id, ConnectionId::new_unchecked(1)); + + manager + .handle_service_message(ServiceMessage::PeerConnected(remote_peer)) + .await; + + let status = manager + .peers + .get(&remote_peer) + .expect("Peer to be registered in connection manager"); + assert_eq!(manager.peers.len(), 1); + assert_eq!(status.peer, remote_peer); + + // Manager attempts a replication session with that peer + assert_eq!(rx.len(), 1); + assert_eq!( + rx.recv().await, + Ok(ServiceMessage::SentReplicationMessage( + remote_peer, + SyncMessage::new(0, Message::SyncRequest(Mode::Naive, target_set)) + )) + ); + assert_eq!(manager.sync_manager.get_sessions(&remote_peer).len(), 1); + + // Inform manager about peer disconnected + manager + .handle_service_message(ServiceMessage::PeerDisconnected(remote_peer)) + .await; + + // Manager cleans up internal state + assert_eq!(rx.len(), 0); + assert_eq!(manager.peers.len(), 0); + assert_eq!(manager.sync_manager.get_sessions(&remote_peer).len(), 0); + }); + } +} From 908e4fead8acfb2658d1f0b75badca32f7a9b29d Mon Sep 17 00:00:00 2001 From: adz Date: Sun, 18 Jun 2023 16:09:10 +0200 Subject: [PATCH 119/126] Write some more doc-strings --- aquadoggo/src/network/behaviour.rs | 11 ++++ aquadoggo/src/network/peers/behaviour.rs | 2 + aquadoggo/src/network/peers/handler.rs | 53 +++++++++++++++ aquadoggo/src/network/peers/peer.rs | 17 +++++ aquadoggo/src/network/service.rs | 3 +- aquadoggo/src/replication/service.rs | 82 +++++++++++++++--------- 6 files changed, 138 insertions(+), 30 deletions(-) diff --git a/aquadoggo/src/network/behaviour.rs b/aquadoggo/src/network/behaviour.rs index 5e061d186..90380420f 100644 --- a/aquadoggo/src/network/behaviour.rs +++ b/aquadoggo/src/network/behaviour.rs @@ -24,6 +24,17 @@ const PING_INTERVAL: Duration = Duration::from_secs(5); const PING_TIMEOUT: Duration = Duration::from_secs(3); /// Network behaviour for the aquadoggo node. +/// +/// In libp2p all different behaviours are "merged" into one "main behaviour" with help of the +/// `NetworkBehaviour` derive macro. +/// +/// All behaviours share the same connections with each other. Together they form something we +/// could call our "custom" networking behaviour. +/// +/// It is possible for a peer to not support all behaviours, internally libp2p negotiates the +/// capabilities of each peer for us and upgrades the protocol accordingly. For example two peers +/// can handle p2panda messages with each others (using the `peers` behaviour) but do not +/// necessarily need to be able to support the `relay` behaviour. #[derive(NetworkBehaviour)] pub struct Behaviour { /// Determine NAT status by requesting remote peers to dial the public address of the diff --git a/aquadoggo/src/network/peers/behaviour.rs b/aquadoggo/src/network/peers/behaviour.rs index 8e9be6139..97cc89f1a 100644 --- a/aquadoggo/src/network/peers/behaviour.rs +++ b/aquadoggo/src/network/peers/behaviour.rs @@ -29,6 +29,8 @@ pub enum Event { PeerDisconnected(Peer), } +/// p2panda network behaviour managing peers who can speak the p2panda protocol, handling incoming +/// and outgoing messages. #[derive(Debug)] pub struct Behaviour { events: VecDeque>, diff --git a/aquadoggo/src/network/peers/handler.rs b/aquadoggo/src/network/peers/handler.rs index 82128904b..1878d10a9 100644 --- a/aquadoggo/src/network/peers/handler.rs +++ b/aquadoggo/src/network/peers/handler.rs @@ -21,6 +21,59 @@ use crate::replication::SyncMessage; /// send/receiving a message from. Connections that idle beyond this timeout are disconnected. const IDLE_TIMEOUT: Duration = Duration::from_secs(60); +/// Handler for an incoming or outgoing connection to a remote peer dealing with the p2panda +/// protocol. +/// +/// Manages the bi-directional data streams and encodes and decodes p2panda messages on them using +/// the CBOR format. +/// +/// Connection handlers can be closed due to critical errors, for example when a replication error +/// occurred. They also can close after a certain duration of no networking activity (timeout). +/// Note that this does _not_ close the connection to the peer in general, only the p2panda +/// messaging protocol. +/// +/// Peers can have multiple connections to peers, even if it is the same one. This especially is +/// the case when both peers dial each other at the same time. Then we will have two connections +/// (and two handlers) for each an incoming (remote peer dialed us) and outgoing (we dialed remote +/// peer) connection. Please note that this is a special (but not unusual) case. The regular case +/// is that we will only maintain one connection (either incoming or outgoing) to a peer. +/// +/// Each connection is managed by one connection handler each. Inside of each connection we +/// maintain a bi-directional (inbound & outbound) data stream. +/// +/// The following diagram is an example of two connections to one remote peer: +/// +/// Connection +/// (Incoming) +/// ┌───────────────────┐ +/// │ │ +/// │ ┌─────────────┐ │ ┌─────────────┐ +/// │ │ Stream ◄──┼──────────┤ │ +/// │ │ (Inbound) │ │ │ │ +/// │ └─────────────┘ │ │ │ +/// │ │ │ │ +/// │ ┌─────────────┐ │ │ │ +/// │ │ Stream ├──┼──────────► │ +/// │ │ (Outbound) │ │ │ │ +/// │ └─────────────┘ │ │ │ +/// │ │ │ │ +/// └───────────────────┘ │ │ +/// │ │ +/// Connection │ Remote Peer │ +/// (Outgoing) │ │ +/// ┌───────────────────┐ │ │ +/// │ │ │ │ +/// │ ┌─────────────┐ │ │ │ +/// │ │ Stream ◄──┼──────────┤ │ +/// │ │ (Inbound) │ │ │ │ +/// │ └─────────────┘ │ │ │ +/// │ │ │ │ +/// │ ┌─────────────┐ │ │ │ +/// │ │ Stream ├──┼──────────► │ +/// │ │ (Outbound) │ │ │ │ +/// │ └─────────────┘ │ └─────────────┘ +/// │ │ +/// └───────────────────┘ pub struct Handler { /// Upgrade configuration for the protocol. listen_protocol: SubstreamProtocol, diff --git a/aquadoggo/src/network/peers/peer.rs b/aquadoggo/src/network/peers/peer.rs index 9fc55f8e0..be8e0935f 100644 --- a/aquadoggo/src/network/peers/peer.rs +++ b/aquadoggo/src/network/peers/peer.rs @@ -6,22 +6,36 @@ use libp2p::swarm::ConnectionId; use libp2p::PeerId; use p2panda_rs::Human; +/// Identifier of a p2panda peer. +/// +/// Additional to the unique `PeerId` we also store the `ConnectionId` to understand which libp2p +/// connection handler deals with the communication with that peer. In case connections get stale +/// or fail we can use this information to understand which peer got affected. #[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] pub struct Peer(PeerId, ConnectionId); impl Peer { + /// Returns a new instance of a peer. pub fn new(peer_id: PeerId, connection_id: ConnectionId) -> Self { Self(peer_id, connection_id) } + /// Returns a new instance of our local peer. + /// + /// Local peers can not have a connection "to themselves", still we want to be able to compare + /// our local peer with a remote one. This method therefore sets a "fake" `ConnectionId`. pub fn new_local_peer(local_peer_id: PeerId) -> Self { Self(local_peer_id, ConnectionId::new_unchecked(0)) } + /// Returns the `PeerId` of this peer. + /// + /// The `PeerId` is used to determine which peer "wins" over a duplicate session conflict. pub fn id(&self) -> PeerId { self.0 } + /// Returns the `ConnectionId` which handles the bi-directional communication to that peer. pub fn connection_id(&self) -> ConnectionId { self.1 } @@ -29,6 +43,9 @@ impl Peer { impl Ord for Peer { fn cmp(&self, other: &Self) -> Ordering { + // When comparing `Peer` instances (for example to handle duplicate session requests), we + // only look at the internal `PeerId` since this is what both peers (local and remote) know + // about (the connection id might be different) self.0.cmp(&other.0) } } diff --git a/aquadoggo/src/network/service.rs b/aquadoggo/src/network/service.rs index 802c090b1..edf95f5db 100644 --- a/aquadoggo/src/network/service.rs +++ b/aquadoggo/src/network/service.rs @@ -19,7 +19,7 @@ use crate::network::peers; use crate::network::swarm; use crate::network::NetworkConfiguration; -/// Network service that configures and deploys a network swarm over QUIC transports. +/// Network service that configures and deploys a libp2p network swarm over QUIC transports. /// /// The swarm listens for incoming connections, dials remote nodes, manages connections and /// executes predefined network behaviours. @@ -100,6 +100,7 @@ pub async fn network_service( Ok(()) } +/// Main loop polling the async swarm event stream and incoming service messages stream. struct EventLoop { swarm: Swarm, tx: ServiceSender, diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index 8df620328..ebf66be26 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -60,6 +60,7 @@ pub async fn replication_service( Ok(()) } +/// Statistics about successful and failed replication sessions for each connected peer. #[derive(Debug, Clone, PartialEq, Eq)] struct PeerStatus { peer: Peer, @@ -77,6 +78,16 @@ impl PeerStatus { } } +/// Coordinates peer connections and replication sessions. +/// +/// This entails: +/// +/// 1. Handles incoming replication- and peer connection messages from other services +/// 2. Maintains a list of currently connected p2panda peers +/// 3. Routes messages to the right replication session with help of the `SyncManager` and returns +/// responses to other services +/// 4. Schedules new replication sessions +/// 5. Handles replication errors and informs other services about them struct ConnectionManager { /// List of peers the connection mananger knows about and are available for replication. peers: HashMap, @@ -99,6 +110,7 @@ struct ConnectionManager { } impl ConnectionManager { + /// Returns a new instance of `ConnectionManager`. pub fn new( schema_provider: &SchemaProvider, store: &SqlStore, @@ -132,13 +144,7 @@ impl ConnectionManager { TargetSet::new(&supported_schema_ids) } - fn remove_connection(&mut self, peer: Peer) { - match self.peers.remove(&peer) { - Some(_) => debug!("Remove peer: {}", peer.display()), - None => warn!("Tried to remove connection from unknown peer"), - } - } - + /// Register a new peer in the network. async fn on_connection_established(&mut self, peer: Peer) { info!("Connected to peer: {}", peer.display()); @@ -153,6 +159,7 @@ impl ConnectionManager { } } + /// Handle a peer disconnecting from the network. async fn on_connection_closed(&mut self, peer: Peer) { info!("Disconnected from peer: {}", peer.display()); @@ -161,6 +168,15 @@ impl ConnectionManager { self.remove_connection(peer) } + /// Remove a peer from the network. + fn remove_connection(&mut self, peer: Peer) { + match self.peers.remove(&peer) { + Some(_) => debug!("Remove peer: {}", peer.display()), + None => warn!("Tried to remove connection from unknown peer"), + } + } + + /// Route incoming replication messages to the right session. async fn on_replication_message(&mut self, peer: Peer, message: SyncMessage) { let session_id = message.session_id(); @@ -182,6 +198,7 @@ impl ConnectionManager { } } + /// Handle successful replication sessions. async fn on_replication_finished(&mut self, peer: Peer, _session_id: SessionId) { info!("Finished replication with peer {}", peer.display()); @@ -195,6 +212,7 @@ impl ConnectionManager { } } + /// Handle replication errors and inform other services about them. async fn on_replication_error( &mut self, peer: Peer, @@ -218,28 +236,8 @@ impl ConnectionManager { self.send_service_message(ServiceMessage::ReplicationFailed(peer)); } - async fn handle_service_message(&mut self, message: ServiceMessage) { - match message { - ServiceMessage::PeerConnected(peer) => { - self.on_connection_established(peer).await; - } - ServiceMessage::PeerDisconnected(peer) => { - self.on_connection_closed(peer).await; - } - ServiceMessage::ReceivedReplicationMessage(peer, message) => { - self.on_replication_message(peer, message).await; - } - _ => (), // Ignore all other messages - } - } - - fn send_service_message(&self, message: ServiceMessage) { - if self.tx.send(message).is_err() { - // Silently fail here as we don't care if the message was received at this - // point - } - } - + /// Determine if we can attempt new replication sessions with the peers we currently know + /// about. async fn update_sessions(&mut self) { // Determine the target set our node is interested in let target_set = self.target_set().await; @@ -282,6 +280,7 @@ impl ConnectionManager { } } + /// Initiate a new replication session with remote peer. async fn initiate_replication(&mut self, peer: &Peer, target_set: &TargetSet) { match self .sync_manager @@ -301,6 +300,31 @@ impl ConnectionManager { } } + /// Handles incoming messages from other services via the bus. + async fn handle_service_message(&mut self, message: ServiceMessage) { + match message { + ServiceMessage::PeerConnected(peer) => { + self.on_connection_established(peer).await; + } + ServiceMessage::PeerDisconnected(peer) => { + self.on_connection_closed(peer).await; + } + ServiceMessage::ReceivedReplicationMessage(peer, message) => { + self.on_replication_message(peer, message).await; + } + _ => (), // Ignore all other messages + } + } + + /// Sends a message on the bus to other services. + fn send_service_message(&self, message: ServiceMessage) { + if self.tx.send(message).is_err() { + // Silently fail here as we don't care if the message was received at this + // point + } + } + + /// Main event loop running the async streams. pub async fn run(mut self) { loop { tokio::select! { From 4461a19855fbe0ff49520d44723d64cf908d75a5 Mon Sep 17 00:00:00 2001 From: adz Date: Sun, 18 Jun 2023 16:44:39 +0200 Subject: [PATCH 120/126] Add more docs --- aquadoggo/src/network/peers/behaviour.rs | 52 +++++++++++++++++++++++- aquadoggo/src/network/peers/handler.rs | 11 +++-- 2 files changed, 55 insertions(+), 8 deletions(-) diff --git a/aquadoggo/src/network/peers/behaviour.rs b/aquadoggo/src/network/peers/behaviour.rs index 97cc89f1a..644b4a36c 100644 --- a/aquadoggo/src/network/peers/behaviour.rs +++ b/aquadoggo/src/network/peers/behaviour.rs @@ -29,8 +29,56 @@ pub enum Event { PeerDisconnected(Peer), } -/// p2panda network behaviour managing peers who can speak the p2panda protocol, handling incoming -/// and outgoing messages. +/// p2panda network behaviour managing peers who can speak the "p2panda" protocol, handling +/// incoming and outgoing messages related to it. +/// +/// This custom behaviour represents the "p2panda" protocol. As soon as both peers agree that they +/// can speak the "p2panda" protocol libp2p will upgrade the connection and enable this custom +/// `NetworkBehaviour` implementation. +/// +/// All behaviours will share the same connections but each individual behaviour maintains its own +/// connection handlers on top of them. With this in mind the following procedure takes place: +/// +/// 1. Swarm discovers a node and dials a new outgoing connection OR swarm listener was dialed from +/// a remote peer, establishes a new incoming connection +/// 2. Swarm negotiates if new node can speak the "p2panda" protocol. If this is the case the +/// connection gets upgraded +/// 3. Custom p2panda `NetworkBehaviour` initialises the `ConnectionHandler` for the underlying +/// connection (see `handle_established_inbound_connection` or +/// `handle_established_outbound_connection`) and informs other services about new peer +/// 4. Custom p2panda `ConnectionHandler` establishes bi-directional streams which encode and +/// decode CBOR messages for us. As soon as a new message arrives the handler informs the +/// behaviour about it +/// 5. Custom p2panda `NetworkBehaviour` receives incoming message from handler and passes it +/// further to other services +/// 6. Custom p2panda `NetworkBehaviour` receives messages from other services and passes them down +/// again to `ConnectionHandler` which sends them over the data stream to remote node +/// 7. Swarm informs `NetworkBehaviour` about closed connection handlers (gracefully or via +/// time-out). The custom p2panda `NetworkBehaviour` informs other services about disconnected +/// peer +/// +/// ```text +/// Swarm +/// ┌──────────────────────────────────────────────────────────────────┐ +/// │ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ │ +/// │ │ Connection │ │ Connection │ │ Connection │ │ +/// │ └──────┬───────┘ └───────┬──────┘ └───────┬──────┘ │ +/// │ │ │ │ │ +/// │ Upgrade Upgrade Upgrade │ +/// │ │ │ │ │ +/// └─────────┼───────────────────────┼──────────────────────┼─────────┘ +/// │ │ │ +/// ┌───────────────┼───────────────────────┼──────────────────────┼────────────────┐ +/// │ ┌──────────┴───────────────────────┴──────────────────────┴───────────┐ │ +/// │ │ NetworkBehaviour │ │ +/// │ └──────────┬───────────────────────┬──────────────────────┬───────────┘ │ +/// │ │ │ │ │ +/// │ ┌──────────▼──────────┐ ┌──────────▼──────────┐ ┌─────────▼───────────┐ │ +/// │ │ ConnectionHandler │ │ ConnectionHandler │ │ ConnectionHandler │ │ +/// │ └─────────────────────┘ └─────────────────────┘ └─────────────────────┘ │ +/// └───────────────────────────────────────────────────────────────────────────────┘ +/// p2panda protocol +/// ``` #[derive(Debug)] pub struct Behaviour { events: VecDeque>, diff --git a/aquadoggo/src/network/peers/handler.rs b/aquadoggo/src/network/peers/handler.rs index 1878d10a9..4f7c13799 100644 --- a/aquadoggo/src/network/peers/handler.rs +++ b/aquadoggo/src/network/peers/handler.rs @@ -32,17 +32,15 @@ const IDLE_TIMEOUT: Duration = Duration::from_secs(60); /// Note that this does _not_ close the connection to the peer in general, only the p2panda /// messaging protocol. /// -/// Peers can have multiple connections to peers, even if it is the same one. This especially is -/// the case when both peers dial each other at the same time. Then we will have two connections -/// (and two handlers) for each an incoming (remote peer dialed us) and outgoing (we dialed remote -/// peer) connection. Please note that this is a special (but not unusual) case. The regular case -/// is that we will only maintain one connection (either incoming or outgoing) to a peer. +/// Usually one connection is established to one peer. Multiple connections to the same peer are +/// also possible. This especially is the case when both peers dial each other at the same time. /// /// Each connection is managed by one connection handler each. Inside of each connection we /// maintain a bi-directional (inbound & outbound) data stream. /// -/// The following diagram is an example of two connections to one remote peer: +/// The following diagram is an example of two connections from one local to one remote peer: /// +/// ```text /// Connection /// (Incoming) /// ┌───────────────────┐ @@ -74,6 +72,7 @@ const IDLE_TIMEOUT: Duration = Duration::from_secs(60); /// │ └─────────────┘ │ └─────────────┘ /// │ │ /// └───────────────────┘ +/// ``` pub struct Handler { /// Upgrade configuration for the protocol. listen_protocol: SubstreamProtocol, From e3daade6c57577cf50beec29e36d1960dbc4b2ad Mon Sep 17 00:00:00 2001 From: adz Date: Sun, 18 Jun 2023 19:43:32 +0200 Subject: [PATCH 121/126] Disconnect from all peers before shutdown --- aquadoggo/src/network/mod.rs | 2 + aquadoggo/src/network/service.rs | 61 +++++++++++++--- aquadoggo/src/network/shutdown.rs | 115 ++++++++++++++++++++++++++++++ 3 files changed, 170 insertions(+), 8 deletions(-) create mode 100644 aquadoggo/src/network/shutdown.rs diff --git a/aquadoggo/src/network/mod.rs b/aquadoggo/src/network/mod.rs index 907f2a3a6..a5440ba00 100644 --- a/aquadoggo/src/network/mod.rs +++ b/aquadoggo/src/network/mod.rs @@ -5,9 +5,11 @@ mod config; pub mod identity; mod peers; mod service; +mod shutdown; mod swarm; mod transport; pub use config::NetworkConfiguration; pub use peers::Peer; pub use service::network_service; +pub use shutdown::ShutdownHandler; diff --git a/aquadoggo/src/network/service.rs b/aquadoggo/src/network/service.rs index edf95f5db..3559a5e55 100644 --- a/aquadoggo/src/network/service.rs +++ b/aquadoggo/src/network/service.rs @@ -1,10 +1,12 @@ // SPDX-License-Identifier: AGPL-3.0-or-later +use std::time::Duration; + use anyhow::Result; use libp2p::multiaddr::Protocol; use libp2p::ping::Event; use libp2p::swarm::{AddressScore, ConnectionError, SwarmEvent}; -use libp2p::{autonat, identify, mdns, rendezvous, Multiaddr, Swarm}; +use libp2p::{autonat, identify, mdns, rendezvous, Multiaddr, PeerId, Swarm}; use log::{debug, trace, warn}; use tokio::task; use tokio_stream::wrappers::BroadcastStream; @@ -17,7 +19,7 @@ use crate::network::behaviour::{Behaviour, BehaviourEvent}; use crate::network::config::NODE_NAMESPACE; use crate::network::peers; use crate::network::swarm; -use crate::network::NetworkConfiguration; +use crate::network::{NetworkConfiguration, ShutdownHandler}; /// Network service that configures and deploys a libp2p network swarm over QUIC transports. /// @@ -83,8 +85,16 @@ pub async fn network_service( swarm.dial(addr)?; } + let mut shutdown_handler = ShutdownHandler::new(); + // Spawn a task to run swarm in event loop - let event_loop = EventLoop::new(swarm, tx, external_circuit_addr, network_config); + let event_loop = EventLoop::new( + swarm, + tx, + external_circuit_addr, + network_config, + shutdown_handler.clone(), + ); let handle = task::spawn(event_loop.run()); if tx_ready.send(()).is_err() { @@ -97,6 +107,8 @@ pub async fn network_service( _ = shutdown => (), } + shutdown_handler.is_done().await; + Ok(()) } @@ -107,6 +119,7 @@ struct EventLoop { rx: BroadcastStream, external_circuit_addr: Option, network_config: NetworkConfiguration, + shutdown_handler: ShutdownHandler, } impl EventLoop { @@ -115,6 +128,7 @@ impl EventLoop { tx: ServiceSender, external_circuit_addr: Option, network_config: NetworkConfiguration, + shutdown_handler: ShutdownHandler, ) -> Self { Self { swarm, @@ -122,12 +136,31 @@ impl EventLoop { tx, external_circuit_addr, network_config, + shutdown_handler, } } + /// Close all connections actively. + pub async fn shutdown(&mut self) { + let peers: Vec = self.swarm.connected_peers().copied().collect(); + + for peer_id in peers { + if self.swarm.disconnect_peer_id(peer_id).is_err() { + // Silently ignore errors when disconnecting during shutdown + } + } + + // Wait a little bit for libp2p to actually close all connections + tokio::time::sleep(Duration::from_millis(25)).await; + + self.shutdown_handler.set_done(); + } + /// Main event loop handling libp2p swarm events and incoming messages from the service bus as /// an ongoing async stream. pub async fn run(mut self) { + let mut shutdown_request_received = self.shutdown_handler.is_requested(); + loop { tokio::select! { event = self.swarm.next() => { @@ -143,6 +176,9 @@ impl EventLoop { return }, }, + _ = shutdown_request_received.next() => { + self.shutdown().await; + } } } } @@ -208,11 +244,20 @@ impl EventLoop { } SwarmEvent::ConnectionClosed { peer_id, cause, .. } => match cause { Some(ConnectionError::IO(error)) => { - if error.to_string() == "timed out" { - // Sometimes we receive time out errors from here - debug!("Connection timed out with peer {peer_id}"); - } else { - warn!("Connection error occurred with peer {peer_id}: {error}"); + // IO errors coming from libp2p are cumbersome to match, so we just convert + // them to their string representation + match error.to_string().as_str() { + "timed out" => { + debug!("Connection timed out with peer {peer_id}"); + } + "closed by peer: 0" => { + // We received an `ApplicationClose` with code 0 here which means the + // other peer actively closed the connection + debug!("Connection closed with peer {peer_id}"); + } + _ => { + warn!("Connection error occurred with peer {peer_id}: {error}"); + } } } Some(ConnectionError::KeepAliveTimeout) => { diff --git a/aquadoggo/src/network/shutdown.rs b/aquadoggo/src/network/shutdown.rs new file mode 100644 index 000000000..7475ac6b9 --- /dev/null +++ b/aquadoggo/src/network/shutdown.rs @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: AGPL-3.0-or-later + +use std::pin::Pin; +use std::task::{Context, Poll}; + +use futures::{FutureExt, Stream}; +use triggered::{Listener, Trigger}; + +/// Helper to coordinate finishing an async process which needs to take place before we can close +/// the application. +#[derive(Clone)] +pub struct ShutdownHandler { + request_trigger: Trigger, + request_signal: Listener, + done_trigger: Trigger, + done_signal: Listener, +} + +impl ShutdownHandler { + /// Returns a new instance of `ShutdownHandler`. + pub fn new() -> Self { + let (request_trigger, request_signal) = triggered::trigger(); + let (done_trigger, done_signal) = triggered::trigger(); + + Self { + request_trigger, + request_signal, + done_trigger, + done_signal, + } + } + + /// Returns an async stream which can be polled to find out if a shutdown request was sent. + pub fn is_requested(&self) -> ShutdownRequest { + ShutdownRequest { + inner: self.request_signal.clone(), + is_sent: false, + } + } + + /// Signal that the shutdown has completed. + pub fn set_done(&mut self) { + self.done_trigger.trigger(); + } + + /// Returns a future which can be polled to find out if the shutdown has completed. + /// + /// This automatically triggers the request to shut down when being called. + pub fn is_done(&mut self) -> Listener { + self.request_trigger.trigger(); + self.done_signal.clone() + } +} + +pub struct ShutdownRequest { + inner: Listener, + is_sent: bool, +} + +impl Stream for ShutdownRequest { + type Item = bool; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + if self.is_sent { + return Poll::Pending; + } + + match self.inner.poll_unpin(cx) { + Poll::Ready(_) => { + self.is_sent = true; + Poll::Ready(Some(true)) + } + Poll::Pending => Poll::Pending, + } + } +} + +#[cfg(test)] +mod tests { + use std::sync::atomic::{AtomicUsize, Ordering}; + use std::sync::Arc; + + use tokio_stream::StreamExt; + + use super::ShutdownHandler; + + #[tokio::test] + async fn changes_value_before_shutdown() { + let num = Arc::new(AtomicUsize::new(0)); + let mut handler = ShutdownHandler::new(); + + { + let num = num.clone(); + let mut handler = handler.clone(); + + tokio::task::spawn(async move { + let mut signal = handler.is_requested(); + + loop { + tokio::select! { + _ = signal.next() => { + // Change the value before we wind down + num.store(100, Ordering::Relaxed); + handler.set_done(); + } + }; + } + }); + } + + handler.is_done().await; + + assert_eq!(num.load(Ordering::Relaxed), 100); + } +} From 0215c62ba34c0f2a3a9646d590fe31617c09a3ef Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Mon, 19 Jun 2023 13:29:20 +0900 Subject: [PATCH 122/126] Dial peers by multiaddr on mdns discovery --- aquadoggo/src/network/service.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aquadoggo/src/network/service.rs b/aquadoggo/src/network/service.rs index 3559a5e55..744d24af0 100644 --- a/aquadoggo/src/network/service.rs +++ b/aquadoggo/src/network/service.rs @@ -313,10 +313,10 @@ impl EventLoop { // ~~~~ SwarmEvent::Behaviour(BehaviourEvent::Mdns(event)) => match event { mdns::Event::Discovered(list) => { - for (peer_id, _multiaddr) in list { + for (peer_id, multiaddr) in list { debug!("mDNS discovered a new peer: {peer_id}"); - if let Err(err) = self.swarm.dial(peer_id) { + if let Err(err) = self.swarm.dial(multiaddr) { warn!("Failed to dial: {}", err); } else { debug!("Dial success: skip remaining addresses for: {peer_id}"); From 1bcba23f3e2c1029dd6b53d96e20c1ee43bc2900 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Mon, 19 Jun 2023 15:22:29 +0900 Subject: [PATCH 123/126] Rename Naive -> LogHeight strategy --- aquadoggo/src/replication/manager.rs | 22 +++++++++---------- aquadoggo/src/replication/message.rs | 2 +- aquadoggo/src/replication/mod.rs | 2 +- aquadoggo/src/replication/mode.rs | 12 +++++----- aquadoggo/src/replication/service.rs | 4 ++-- aquadoggo/src/replication/session.rs | 8 +++---- .../strategies/{naive.rs => log_height.rs} | 8 +++---- aquadoggo/src/replication/strategies/mod.rs | 4 ++-- 8 files changed, 31 insertions(+), 31 deletions(-) rename aquadoggo/src/replication/strategies/{naive.rs => log_height.rs} (97%) diff --git a/aquadoggo/src/replication/manager.rs b/aquadoggo/src/replication/manager.rs index 023dc5bc3..60f9ae1d5 100644 --- a/aquadoggo/src/replication/manager.rs +++ b/aquadoggo/src/replication/manager.rs @@ -17,7 +17,7 @@ use crate::replication::{ pub const INITIAL_SESSION_ID: SessionId = 0; -pub const SUPPORTED_MODES: [Mode; 1] = [Mode::Naive]; +pub const SUPPORTED_MODES: [Mode; 1] = [Mode::LogHeight]; pub const SUPPORT_LIVE_MODE: bool = false; @@ -547,7 +547,7 @@ mod tests { let peer_id_remote: Peer = Peer::new("remote"); test_runner(move |node: TestNode| async move { - let mode = Mode::Naive; + let mode = Mode::LogHeight; let (tx, _rx) = broadcast::channel(8); let ingest = SyncIngest::new(SchemaProvider::default(), tx); @@ -589,18 +589,18 @@ mod tests { let mut manager = SyncManager::new(node.context.store.clone(), ingest, peer_id_local); let message = - SyncMessage::new(0, Message::SyncRequest(Mode::Naive, target_set_1.clone())); + SyncMessage::new(0, Message::SyncRequest(Mode::LogHeight, target_set_1.clone())); let result = manager.handle_message(&peer_id_remote, &message).await; assert!(result.is_ok()); let message = - SyncMessage::new(1, Message::SyncRequest(Mode::Naive, target_set_2.clone())); + SyncMessage::new(1, Message::SyncRequest(Mode::LogHeight, target_set_2.clone())); let result = manager.handle_message(&peer_id_remote, &message).await; assert!(result.is_ok()); // Reject attempt to create session again let message = - SyncMessage::new(0, Message::SyncRequest(Mode::Naive, target_set_3.clone())); + SyncMessage::new(0, Message::SyncRequest(Mode::LogHeight, target_set_3.clone())); let result = manager.handle_message(&peer_id_remote, &message).await; assert!(matches!(result, Err(ReplicationError::DuplicateSession(err)) if err == DuplicateSessionRequestError::InboundPendingSession(0) @@ -608,7 +608,7 @@ mod tests { // Reject different session concerning same target set let message = - SyncMessage::new(2, Message::SyncRequest(Mode::Naive, target_set_2.clone())); + SyncMessage::new(2, Message::SyncRequest(Mode::LogHeight, target_set_2.clone())); let result = manager.handle_message(&peer_id_remote, &message).await; assert!(matches!( result, @@ -661,7 +661,7 @@ mod tests { let peer_id_remote: Peer = Peer::new("remote"); test_runner(move |node: TestNode| async move { - let mode = Mode::Naive; + let mode = Mode::LogHeight; let (tx, _rx) = broadcast::channel(8); let ingest = SyncIngest::new(SchemaProvider::default(), tx); @@ -862,7 +862,7 @@ mod tests { let peer_id_remote: Peer = Peer::new("remote"); test_runner(move |node: TestNode| async move { - let mode = Mode::Naive; + let mode = Mode::LogHeight; let (tx, _rx) = broadcast::channel(8); let ingest = SyncIngest::new(SchemaProvider::default(), tx); @@ -993,7 +993,7 @@ mod tests { ); let message = SyncMessage::new( INITIAL_SESSION_ID, - Message::SyncRequest(Mode::Naive, target_set.clone()), + Message::SyncRequest(Mode::LogHeight, target_set.clone()), ); let result = manager.handle_message(&peer_id_remote, &message).await; assert!(result.is_ok()); @@ -1068,7 +1068,7 @@ mod tests { // Send `SyncRequest` to remote let messages = manager_a - .initiate_session(&peer_id_remote, &target_set, &Mode::Naive) + .initiate_session(&peer_id_remote, &target_set, &Mode::LogHeight) .await .unwrap(); @@ -1076,7 +1076,7 @@ mod tests { messages, vec![SyncMessage::new( 0, - Message::SyncRequest(Mode::Naive, target_set.clone()) + Message::SyncRequest(Mode::LogHeight, target_set.clone()) )] ); diff --git a/aquadoggo/src/replication/message.rs b/aquadoggo/src/replication/message.rs index c88c9c23f..1a39922a7 100644 --- a/aquadoggo/src/replication/message.rs +++ b/aquadoggo/src/replication/message.rs @@ -241,7 +241,7 @@ mod tests { assert_eq!( deserialize_into::(&serialize_value(cbor!([0, 12, 0, target_set]))) .unwrap(), - SyncMessage::new(12, Message::SyncRequest(Mode::Naive, target_set.clone())) + SyncMessage::new(12, Message::SyncRequest(Mode::LogHeight, target_set.clone())) ); let log_heights: Vec<(PublicKey, Vec<(LogId, SeqNum)>)> = vec![]; diff --git a/aquadoggo/src/replication/mod.rs b/aquadoggo/src/replication/mod.rs index e3584fedc..77f562995 100644 --- a/aquadoggo/src/replication/mod.rs +++ b/aquadoggo/src/replication/mod.rs @@ -17,5 +17,5 @@ pub use message::{LiveMode, LogHeight, Message, SyncMessage}; pub use mode::Mode; pub use service::replication_service; pub use session::{Session, SessionId, SessionState}; -pub use strategies::{NaiveStrategy, SetReconciliationStrategy, StrategyResult}; +pub use strategies::{LogHeightStrategy, SetReconciliationStrategy, StrategyResult}; pub use target_set::TargetSet; diff --git a/aquadoggo/src/replication/mode.rs b/aquadoggo/src/replication/mode.rs index c592c83ff..3d73f96fe 100644 --- a/aquadoggo/src/replication/mode.rs +++ b/aquadoggo/src/replication/mode.rs @@ -7,7 +7,7 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer}; #[derive(Clone, Debug, Eq, PartialEq)] pub enum Mode { - Naive, + LogHeight, SetReconciliation, Unknown, } @@ -15,7 +15,7 @@ pub enum Mode { impl Mode { pub fn as_str(&self) -> &str { match self { - Mode::Naive => "naive", + Mode::LogHeight => "log-height", Mode::SetReconciliation => "set-reconciliation", Mode::Unknown => "unknown", } @@ -23,7 +23,7 @@ impl Mode { pub fn as_u64(&self) -> u64 { match self { - Mode::Naive => 0, + Mode::LogHeight => 0, Mode::SetReconciliation => 1, Mode::Unknown => unreachable!("Can't create an unknown replication mode"), } @@ -33,7 +33,7 @@ impl Mode { impl From for Mode { fn from(value: u64) -> Self { match value { - 0 => Mode::Naive, + 0 => Mode::LogHeight, 1 => Mode::SetReconciliation, _ => Mode::Unknown, } @@ -81,13 +81,13 @@ mod tests { #[test] fn u64_representation() { - assert_eq!(Mode::Naive.as_u64(), 0); + assert_eq!(Mode::LogHeight.as_u64(), 0); assert_eq!(Mode::SetReconciliation.as_u64(), 1); } #[test] fn serialize() { - let bytes = serialize_from(Mode::Naive); + let bytes = serialize_from(Mode::LogHeight); assert_eq!(bytes, vec![0]); } diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index ebf66be26..c3acee757 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -284,7 +284,7 @@ impl ConnectionManager { async fn initiate_replication(&mut self, peer: &Peer, target_set: &TargetSet) { match self .sync_manager - .initiate_session(peer, target_set, &Mode::Naive) + .initiate_session(peer, target_set, &Mode::LogHeight) .await { Ok(messages) => { @@ -400,7 +400,7 @@ mod tests { rx.recv().await, Ok(ServiceMessage::SentReplicationMessage( remote_peer, - SyncMessage::new(0, Message::SyncRequest(Mode::Naive, target_set)) + SyncMessage::new(0, Message::SyncRequest(Mode::LogHeight, target_set)) )) ); assert_eq!(manager.sync_manager.get_sessions(&remote_peer).len(), 1); diff --git a/aquadoggo/src/replication/session.rs b/aquadoggo/src/replication/session.rs index 4387d5895..e5931bf10 100644 --- a/aquadoggo/src/replication/session.rs +++ b/aquadoggo/src/replication/session.rs @@ -9,7 +9,7 @@ use crate::db::SqlStore; use crate::replication::errors::ReplicationError; use crate::replication::traits::Strategy; use crate::replication::{ - Message, Mode, NaiveStrategy, SetReconciliationStrategy, StrategyResult, TargetSet, + Message, Mode, LogHeightStrategy, SetReconciliationStrategy, StrategyResult, TargetSet, }; pub type SessionId = u64; @@ -57,7 +57,7 @@ impl Session { live_mode: bool, ) -> Self { let strategy: Box = match mode { - Mode::Naive => Box::new(NaiveStrategy::new(target_set)), + Mode::LogHeight => Box::new(LogHeightStrategy::new(target_set)), Mode::SetReconciliation => Box::new(SetReconciliationStrategy::new()), Mode::Unknown => panic!("Unknown replication mode"), }; @@ -188,7 +188,7 @@ mod tests { fn state_machine(#[from(random_target_set)] target_set: TargetSet) { test_runner(move |node: TestNode| async move { let mut session = - Session::new(&INITIAL_SESSION_ID, &target_set, &Mode::Naive, true, false); + Session::new(&INITIAL_SESSION_ID, &target_set, &Mode::LogHeight, true, false); assert!(!session.is_local_done); assert!(!session.is_local_live_mode); assert!(!session.is_remote_live_mode); @@ -218,7 +218,7 @@ mod tests { let target_set = TargetSet::new(&vec![config.schema.id().to_owned()]); let mut session = - Session::new(&INITIAL_SESSION_ID, &target_set, &Mode::Naive, true, false); + Session::new(&INITIAL_SESSION_ID, &target_set, &Mode::LogHeight, true, false); let response_messages = session .handle_message(&node.context.store, &Message::Have(vec![])) diff --git a/aquadoggo/src/replication/strategies/naive.rs b/aquadoggo/src/replication/strategies/log_height.rs similarity index 97% rename from aquadoggo/src/replication/strategies/naive.rs rename to aquadoggo/src/replication/strategies/log_height.rs index 638b86b5c..b8e4db506 100644 --- a/aquadoggo/src/replication/strategies/naive.rs +++ b/aquadoggo/src/replication/strategies/log_height.rs @@ -17,13 +17,13 @@ use crate::replication::traits::Strategy; use crate::replication::{LogHeight, Message, Mode, StrategyResult, TargetSet}; #[derive(Clone, Debug)] -pub struct NaiveStrategy { +pub struct LogHeightStrategy { target_set: TargetSet, received_remote_have: bool, sent_have: bool, } -impl NaiveStrategy { +impl LogHeightStrategy { pub fn new(target_set: &TargetSet) -> Self { Self { target_set: target_set.clone(), @@ -97,9 +97,9 @@ impl NaiveStrategy { } #[async_trait] -impl Strategy for NaiveStrategy { +impl Strategy for LogHeightStrategy { fn mode(&self) -> Mode { - Mode::Naive + Mode::LogHeight } fn target_set(&self) -> TargetSet { diff --git a/aquadoggo/src/replication/strategies/mod.rs b/aquadoggo/src/replication/strategies/mod.rs index c509580fc..705726c90 100644 --- a/aquadoggo/src/replication/strategies/mod.rs +++ b/aquadoggo/src/replication/strategies/mod.rs @@ -1,11 +1,11 @@ // SPDX-License-Identifier: AGPL-3.0-or-later mod diff; -mod naive; +mod log_height; mod set_reconciliation; pub use diff::diff_log_heights; -pub use naive::NaiveStrategy; +pub use log_height::LogHeightStrategy; pub use set_reconciliation::SetReconciliationStrategy; use crate::replication::Message; From 9091145dc350536ee16755932377d964a5956434 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Mon, 19 Jun 2023 15:47:05 +0900 Subject: [PATCH 124/126] Naming improvement --- aquadoggo/src/replication/message.rs | 4 ++-- aquadoggo/src/replication/mod.rs | 2 +- aquadoggo/src/replication/strategies/log_height.rs | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/aquadoggo/src/replication/message.rs b/aquadoggo/src/replication/message.rs index 1a39922a7..7ac3a8147 100644 --- a/aquadoggo/src/replication/message.rs +++ b/aquadoggo/src/replication/message.rs @@ -22,14 +22,14 @@ pub type MessageType = u64; pub type LiveMode = bool; -pub type LogHeight = (PublicKey, Vec<(LogId, SeqNum)>); +pub type LogHeights = (PublicKey, Vec<(LogId, SeqNum)>); #[derive(Debug, Clone, Eq, PartialEq)] pub enum Message { SyncRequest(Mode, TargetSet), Entry(EncodedEntry, Option), SyncDone(LiveMode), - Have(Vec), + Have(Vec), } impl Message { diff --git a/aquadoggo/src/replication/mod.rs b/aquadoggo/src/replication/mod.rs index 77f562995..599a64fb3 100644 --- a/aquadoggo/src/replication/mod.rs +++ b/aquadoggo/src/replication/mod.rs @@ -13,7 +13,7 @@ pub mod traits; pub use ingest::SyncIngest; pub use manager::SyncManager; -pub use message::{LiveMode, LogHeight, Message, SyncMessage}; +pub use message::{LiveMode, LogHeights, Message, SyncMessage}; pub use mode::Mode; pub use service::replication_service; pub use session::{Session, SessionId, SessionState}; diff --git a/aquadoggo/src/replication/strategies/log_height.rs b/aquadoggo/src/replication/strategies/log_height.rs index b8e4db506..8493712e3 100644 --- a/aquadoggo/src/replication/strategies/log_height.rs +++ b/aquadoggo/src/replication/strategies/log_height.rs @@ -14,7 +14,7 @@ use crate::db::SqlStore; use crate::replication::errors::ReplicationError; use crate::replication::strategies::diff_log_heights; use crate::replication::traits::Strategy; -use crate::replication::{LogHeight, Message, Mode, StrategyResult, TargetSet}; +use crate::replication::{LogHeights, Message, Mode, StrategyResult, TargetSet}; #[derive(Clone, Debug)] pub struct LogHeightStrategy { @@ -60,7 +60,7 @@ impl LogHeightStrategy { async fn entry_responses( &self, store: &SqlStore, - remote_log_heights: &[LogHeight], + remote_log_heights: &[LogHeights], ) -> Vec { let mut messages = Vec::new(); From d6ea4f4d1126dbebabb470f7a6ab444c48f7d216 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Mon, 19 Jun 2023 15:47:12 +0900 Subject: [PATCH 125/126] Doc strings --- aquadoggo/src/replication/strategies/diff.rs | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/aquadoggo/src/replication/strategies/diff.rs b/aquadoggo/src/replication/strategies/diff.rs index c90e5c088..f3c7a39cc 100644 --- a/aquadoggo/src/replication/strategies/diff.rs +++ b/aquadoggo/src/replication/strategies/diff.rs @@ -7,8 +7,9 @@ use p2panda_rs::entry::{LogId, SeqNum}; use p2panda_rs::identity::PublicKey; use p2panda_rs::Human; -use crate::replication::LogHeight; - +/// Compare a remotes' log heights against our own and calculate which (if any) entries they are +/// missing. The returned tuple signifies the sequence number of a log from which the remote is +/// missing entries. fn remote_requires_entries( log_id: &LogId, local_seq_num: &SeqNum, @@ -54,10 +55,16 @@ fn remote_requires_entries( } } +/// Diff a set of local and remote log heights in order to calculate which, if any, entries the +/// remote is missing. +/// +/// The returned list contains the sequence number in every log for every author from which the +/// remote is missing entries. Sending all entries from the returned sequence number which the +/// local node has stored will bring the remote node up-to-date with us. pub fn diff_log_heights( local_log_heights: &HashMap>, remote_log_heights: &HashMap>, -) -> Vec { +) -> Vec<(PublicKey, Vec<(LogId, SeqNum)>)> { let mut remote_needs = Vec::new(); for (local_author, local_author_logs) in local_log_heights { From 462c4ffbc798d5145e617e063a3dae4a2fd0f868 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Mon, 19 Jun 2023 15:48:50 +0900 Subject: [PATCH 126/126] fmt --- aquadoggo/src/replication/manager.rs | 24 ++++++++++++++++-------- aquadoggo/src/replication/message.rs | 5 ++++- aquadoggo/src/replication/session.rs | 20 +++++++++++++++----- 3 files changed, 35 insertions(+), 14 deletions(-) diff --git a/aquadoggo/src/replication/manager.rs b/aquadoggo/src/replication/manager.rs index 60f9ae1d5..67611555b 100644 --- a/aquadoggo/src/replication/manager.rs +++ b/aquadoggo/src/replication/manager.rs @@ -588,27 +588,35 @@ mod tests { let mut manager = SyncManager::new(node.context.store.clone(), ingest, peer_id_local); - let message = - SyncMessage::new(0, Message::SyncRequest(Mode::LogHeight, target_set_1.clone())); + let message = SyncMessage::new( + 0, + Message::SyncRequest(Mode::LogHeight, target_set_1.clone()), + ); let result = manager.handle_message(&peer_id_remote, &message).await; assert!(result.is_ok()); - let message = - SyncMessage::new(1, Message::SyncRequest(Mode::LogHeight, target_set_2.clone())); + let message = SyncMessage::new( + 1, + Message::SyncRequest(Mode::LogHeight, target_set_2.clone()), + ); let result = manager.handle_message(&peer_id_remote, &message).await; assert!(result.is_ok()); // Reject attempt to create session again - let message = - SyncMessage::new(0, Message::SyncRequest(Mode::LogHeight, target_set_3.clone())); + let message = SyncMessage::new( + 0, + Message::SyncRequest(Mode::LogHeight, target_set_3.clone()), + ); let result = manager.handle_message(&peer_id_remote, &message).await; assert!(matches!(result, Err(ReplicationError::DuplicateSession(err)) if err == DuplicateSessionRequestError::InboundPendingSession(0) )); // Reject different session concerning same target set - let message = - SyncMessage::new(2, Message::SyncRequest(Mode::LogHeight, target_set_2.clone())); + let message = SyncMessage::new( + 2, + Message::SyncRequest(Mode::LogHeight, target_set_2.clone()), + ); let result = manager.handle_message(&peer_id_remote, &message).await; assert!(matches!( result, diff --git a/aquadoggo/src/replication/message.rs b/aquadoggo/src/replication/message.rs index 7ac3a8147..73da83871 100644 --- a/aquadoggo/src/replication/message.rs +++ b/aquadoggo/src/replication/message.rs @@ -241,7 +241,10 @@ mod tests { assert_eq!( deserialize_into::(&serialize_value(cbor!([0, 12, 0, target_set]))) .unwrap(), - SyncMessage::new(12, Message::SyncRequest(Mode::LogHeight, target_set.clone())) + SyncMessage::new( + 12, + Message::SyncRequest(Mode::LogHeight, target_set.clone()) + ) ); let log_heights: Vec<(PublicKey, Vec<(LogId, SeqNum)>)> = vec![]; diff --git a/aquadoggo/src/replication/session.rs b/aquadoggo/src/replication/session.rs index e5931bf10..2a59f5bee 100644 --- a/aquadoggo/src/replication/session.rs +++ b/aquadoggo/src/replication/session.rs @@ -9,7 +9,7 @@ use crate::db::SqlStore; use crate::replication::errors::ReplicationError; use crate::replication::traits::Strategy; use crate::replication::{ - Message, Mode, LogHeightStrategy, SetReconciliationStrategy, StrategyResult, TargetSet, + LogHeightStrategy, Message, Mode, SetReconciliationStrategy, StrategyResult, TargetSet, }; pub type SessionId = u64; @@ -187,8 +187,13 @@ mod tests { #[rstest] fn state_machine(#[from(random_target_set)] target_set: TargetSet) { test_runner(move |node: TestNode| async move { - let mut session = - Session::new(&INITIAL_SESSION_ID, &target_set, &Mode::LogHeight, true, false); + let mut session = Session::new( + &INITIAL_SESSION_ID, + &target_set, + &Mode::LogHeight, + true, + false, + ); assert!(!session.is_local_done); assert!(!session.is_local_live_mode); assert!(!session.is_remote_live_mode); @@ -217,8 +222,13 @@ mod tests { populate_store(&node.context.store, &config).await; let target_set = TargetSet::new(&vec![config.schema.id().to_owned()]); - let mut session = - Session::new(&INITIAL_SESSION_ID, &target_set, &Mode::LogHeight, true, false); + let mut session = Session::new( + &INITIAL_SESSION_ID, + &target_set, + &Mode::LogHeight, + true, + false, + ); let response_messages = session .handle_message(&node.context.store, &Message::Have(vec![]))