This repository has been archived by the owner on Oct 23, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 163
Initial content discovery #260
Merged
Merged
Changes from all commits
Commits
Show all changes
6 commits
Select commit
Hold shift + click to select a range
10974f8
feat: enable content discovery features
ljedrz 4939067
feat: rename the variables in the kad test, add a new content discove…
ljedrz e98e86b
fix: set up the kad protocol in the content discovery test
ljedrz 443bfad
fix: remove a stray clone
ljedrz d090810
fix: don't initialize the global test logger twice
ljedrz a4be9cd
refactor: change the ipfs.docs Cid to the logo one
ljedrz File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,51 +1,99 @@ | ||
use ipfs::Node; | ||
use libp2p::PeerId; | ||
use log::LevelFilter; | ||
use async_std::future::timeout; | ||
use cid::Cid; | ||
use ipfs::{IpfsOptions, Node}; | ||
use libp2p::{Multiaddr, PeerId}; | ||
use log::{LevelFilter, SetLoggerError}; | ||
use std::time::Duration; | ||
|
||
const PEER_COUNT: usize = 20; | ||
|
||
#[async_std::test] | ||
async fn kademlia() { | ||
let _ = env_logger::builder() | ||
fn init_test_logging() -> Result<(), SetLoggerError> { | ||
env_logger::builder() | ||
.is_test(true) | ||
.filter(Some("async_std"), LevelFilter::Error) | ||
.init(); | ||
.try_init() | ||
} | ||
|
||
#[async_std::test] | ||
async fn kademlia_local_peer_discovery() { | ||
const BOOTSTRAPPER_COUNT: usize = 20; | ||
|
||
// set up logging | ||
let _ = init_test_logging(); | ||
|
||
// start up PEER_COUNT bootstrapper nodes | ||
let mut nodes = Vec::with_capacity(PEER_COUNT); | ||
for _ in 0..PEER_COUNT { | ||
nodes.push(Node::new().await); | ||
let mut bootstrappers = Vec::with_capacity(BOOTSTRAPPER_COUNT); | ||
for _ in 0..BOOTSTRAPPER_COUNT { | ||
bootstrappers.push(Node::new().await); | ||
} | ||
|
||
// register the bootstrappers' ids and addresses | ||
let mut peers = Vec::with_capacity(PEER_COUNT); | ||
for node in &nodes { | ||
let (id, addrs) = node.identity().await.unwrap(); | ||
let mut bootstrapper_ids = Vec::with_capacity(BOOTSTRAPPER_COUNT); | ||
for bootstrapper in &bootstrappers { | ||
let (id, addrs) = bootstrapper.identity().await.unwrap(); | ||
let id = PeerId::from_public_key(id); | ||
|
||
peers.push((id, addrs)); | ||
bootstrapper_ids.push((id, addrs)); | ||
} | ||
|
||
// connect all the bootstrappers to one another | ||
for (i, (node_id, _)) in peers.iter().enumerate() { | ||
for (peer_id, addrs) in peers.iter().filter(|(peer_id, _)| peer_id != node_id) { | ||
nodes[i] | ||
.add_peer(peer_id.clone(), addrs[0].clone()) | ||
for (i, (node_id, _)) in bootstrapper_ids.iter().enumerate() { | ||
for (bootstrapper_id, addrs) in bootstrapper_ids | ||
.iter() | ||
.filter(|(peer_id, _)| peer_id != node_id) | ||
{ | ||
bootstrappers[i] | ||
.add_peer(bootstrapper_id.clone(), addrs[0].clone()) | ||
.await | ||
.unwrap(); | ||
} | ||
} | ||
|
||
// introduce an extra peer and connect it to one of the bootstrappers | ||
let extra_peer = Node::new().await; | ||
assert!(extra_peer | ||
.add_peer(peers[0].0.clone(), peers[0].1[0].clone()) | ||
// introduce a peer and connect it to one of the bootstrappers | ||
let peer = Node::new().await; | ||
assert!(peer | ||
.add_peer( | ||
bootstrapper_ids[0].0.clone(), | ||
bootstrapper_ids[0].1[0].clone() | ||
) | ||
.await | ||
.is_ok()); | ||
|
||
// check that kad::bootstrap works | ||
assert!(peer.bootstrap().await.is_ok()); | ||
|
||
// check that kad::get_closest_peers works | ||
assert!(peer.get_closest_peers().await.is_ok()); | ||
} | ||
|
||
#[async_std::test] | ||
async fn kademlia_popular_content_discovery() { | ||
// set up logging | ||
let _ = init_test_logging(); | ||
|
||
let (bootstrapper_id, bootstrapper_addr): (PeerId, Multiaddr) = ( | ||
"QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ" | ||
.parse() | ||
.unwrap(), | ||
"/ip4/104.131.131.82/tcp/4001".parse().unwrap(), | ||
); | ||
|
||
// introduce a peer and specify the Kademlia protocol to it | ||
// without a specified protocol, the test will not complete | ||
let mut opts = IpfsOptions::inmemory_with_generated_keys(); | ||
opts.kad_protocol = Some("/ipfs/lan/kad/1.0.0".to_owned()); | ||
let peer = Node::with_options(opts).await; | ||
|
||
// connect it to one of the well-known bootstrappers | ||
assert!(peer | ||
.add_peer(bootstrapper_id, bootstrapper_addr) | ||
.await | ||
.is_ok()); | ||
|
||
// call kad::bootstrap | ||
assert!(extra_peer.bootstrap().await.is_ok()); | ||
// the Cid of the IPFS logo | ||
let cid: Cid = "bafkreicncneocapbypwwe3gl47bzvr3pkpxmmobzn7zr2iaz67df4kjeiq" | ||
.parse() | ||
.unwrap(); | ||
|
||
// call kad::get_closest_peers | ||
assert!(nodes[0].get_closest_peers().await.is_ok()); | ||
assert!(timeout(Duration::from_secs(10), peer.get_block(&cid)) | ||
.await | ||
.is_ok()); | ||
} |
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I am not 100% with you on this FIXME but no need to change it now. To elaborate:
"Not 100% with you" as in is that the saving blocks should be as decoupled as possible and providing should only happen for some subset of blocks: possibly only the pinned roots, or pinned roots and their subroots or anything more complex would need to be provided for. Where "subroots" would be the roots of the files in a pinned larger directory structure for the invidiual files.
Decoupling providing and put_block because providing can be quite expensive, and we might not want to do them concurrently at all. Additionally not sure if it's a good idea to plan to use start_providing unless we can come up with a really nice backend for the kademlia store. Otherwise we could just use the simpler "push" API and handle the timing ourselves.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Fair enough 👍