diff --git a/.github/workflows/report_test_flakes.yaml b/.github/workflows/report_test_flakes.yaml new file mode 100644 index 000000000..5c42af567 --- /dev/null +++ b/.github/workflows/report_test_flakes.yaml @@ -0,0 +1,125 @@ +on: + workflow_call: + +name: 'Workflow Analysis' + +jobs: + report-test-flakes: + name: 'Report test flakes' + runs-on: ubuntu-latest + permissions: + contents: read + pull-requests: write + steps: + - uses: actions/download-artifact@v3 + - name: Parse test results + id: parse-test-results + run: | + sudo apt-get install colorized-logs + + echo -n 'results=[' >> $GITHUB_OUTPUT + + ENTRIES="" + + for RESULTS_DIR in test-results-*/ ; do + mapfile -t target <$RESULTS_DIR/target + + PLATFORM="${target[0]}" + FEATURES="${target[1]}" + TOOLCHAIN="${target[2]}" + + LOG_PATH="$RESULTS_DIR/log" + csplit -q "$LOG_PATH" %^------------% + + SUMMARY="" + + if [[ -f "./xx00" ]]; then + SUMMARY=$(tail ./xx00 -n+2 | ansi2txt | jq -M --compact-output --raw-input --slurp . | sed -e 's/\\/\\\\/g') + else + continue + fi + + ENTRY="{\"platform\":\"$PLATFORM\",\"features\":\"$FEATURES\",\"toolchain\":\"$TOOLCHAIN\",\"summary\":$SUMMARY}" + + if [ -z "$ENTRIES" ]; then + ENTRIES="$ENTRY" + else + ENTRIES="$ENTRIES,$ENTRY" + fi + + done + + echo -n "$ENTRIES ]" >> $GITHUB_OUTPUT + - name: Report test flakes + uses: actions/github-script@v6 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const results = JSON.parse(`${{ steps.parse-test-results.outputs.results }}`); + + const { data: comments } = await github.rest.issues.listComments({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number, + }); + + const testFlakeAnalysisHeader = 'Test flake analysis'; + const existingComment = comments.find(comment => { + return comment.user.type === 'Bot' && comment.body.includes(testFlakeAnalysisHeader) + }); + + let body = ''; + + if (results.length == 0) { + body = "No test results to analyze. Maybe none of the test runs passed?"; + } else { + + let table = "\n\n| status | platform | features | toolchain |\n|:---:|---|---|---|\n"; + + const flakeSummaries = []; + + for (result of results) { + const isFlakey = result.summary.indexOf("FLAKY") > 0; + + table += `| ${ isFlakey ? "🟡" : "🟢" } | \`${ result.platform }\` | \`${ result.features }\` | \`${result.toolchain}\` |\n`; + + if (isFlakey) { + flakeSummaries.push(`#### Flake summary for \`${ result.platform }\`, \`${ result.features }\`, \`${ result.toolchain }\` + + \`\`\`shell + ${ result.summary } + \`\`\``); + } + } + + if (flakeSummaries.length == 0) { + body += '\nNo flakes detected 🎉\n\n' + } + + body += table; + + if (flakeSummaries.length > 0) { + body += "\n\n"; + body += flakeSummaries.join('\n\n'); + } + } + + body = `### ${testFlakeAnalysisHeader} + + ${body}`; + + if (existingComment) { + github.rest.issues.updateComment({ + owner: context.repo.owner, + repo: context.repo.repo, + comment_id: existingComment.id, + body + }); + } else { + github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body + }); + } diff --git a/.github/workflows/run_test_suite.yaml b/.github/workflows/run_test_suite.yaml index f6311545e..452b5a7b5 100644 --- a/.github/workflows/run_test_suite.yaml +++ b/.github/workflows/run_test_suite.yaml @@ -37,28 +37,6 @@ jobs: swift build --sanitize=address swift test --sanitize=address - run-test-suite-windows: - runs-on: windows-latest - steps: - - uses: actions/checkout@v3 - - uses: Swatinem/rust-cache@v2 - - name: 'Setup Rust' - run: | - curl -sSf https://sh.rustup.rs | sh -s -- -y - - name: 'Install environment packages' - run: | - choco install -y cmake protoc openssl - shell: sh - - name: 'Install IPFS Kubo' - uses: ibnesayeed/setup-ipfs@master - with: - ipfs_version: v0.17.0 - run_daemon: true - - name: 'Run Rust native target tests' - run: cargo test --features test-kubo,helpers - env: - NOOSPHERE_LOG: deafening - run-linting-linux: runs-on: ubuntu-latest steps: @@ -78,15 +56,31 @@ jobs: - name: 'Run Linter' run: cargo clippy --all -- -D warnings - run-test-suite-linux: - runs-on: ubuntu-latest + run-rust-test-suite: + name: 'Run Rust test suite' + strategy: + matrix: + features: ['test-kubo,headers', 'test-kubo,headers,rocksdb'] + platform: ['ubuntu-latest', 'windows-latest', 'macos-13'] + toolchain: ['stable'] + exclude: + - platform: 'windows-latest' + features: 'test-kubo,headers,rocksdb' + runs-on: ${{ matrix.platform }} steps: - uses: actions/checkout@v3 - uses: Swatinem/rust-cache@v2 - name: 'Setup Rust' run: | curl -sSf https://sh.rustup.rs | sh -s -- -y - - name: 'Install environment packages' + rustup toolchain install ${{matrix.toolchain}} + - name: 'Install environment packages (Windows)' + if: ${{ matrix.platform == 'windows-latest' }} + run: | + choco install -y cmake protoc openssl + shell: sh + - name: 'Install environment packages (Linux)' + if: ${{ matrix.platform == 'ubuntu-latest' }} run: | sudo apt-get update -qqy sudo apt-get install jq protobuf-compiler cmake @@ -95,28 +89,40 @@ jobs: with: ipfs_version: v0.17.0 run_daemon: true - - name: 'Run Rust native target tests' - run: NOOSPHERE_LOG=deafening cargo test --features test-kubo,headers - - run-test-suite-linux-rocksdb: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: Swatinem/rust-cache@v2 - - name: 'Setup Rust' + - name: Install cargo-binstall + uses: cargo-bins/cargo-binstall@v1.4.4 + - name: Install binaries from cargo run: | - curl -sSf https://sh.rustup.rs | sh -s -- -y - - name: 'Install environment packages' + cargo +${{ matrix.toolchain }} binstall cargo-nextest --no-confirm --force + - name: 'Run Rust tests' + shell: bash run: | - sudo apt-get update -qqy - sudo apt-get install jq protobuf-compiler cmake libclang-dev - - name: 'Install IPFS Kubo' - uses: ibnesayeed/setup-ipfs@master + mkdir -p test-results + + echo "${{ matrix.platform }} + ${{ matrix.features }} + ${{ matrix.toolchain}}" > test-results/target + + FEATURES=""; + + if [ -n "${{matrix.features}}" ]; then + FEATURES="--features ${{matrix.features}}" + fi + + cargo +${{ matrix.toolchain }} nextest run $FEATURES --retries 5 --color always 2>&1 | tee test-results/log + env: + NOOSPHERE_LOG: academic + - uses: actions/upload-artifact@v3 with: - ipfs_version: v0.17.0 - run_daemon: true - - name: 'Run Rust native target tests (RocksDB)' - run: NOOSPHERE_LOG=defeaning cargo test -p noosphere -p noosphere-storage --features rocksdb,test-kubo + name: test-results-${{ hashFiles('./test-results') }} + path: ./test-results + + report-test-flakes: + name: 'Report test flakes (Linux)' + needs: ['run-rust-test-suite'] + if: always() + uses: ./.github/workflows/report_test_flakes.yaml + secrets: inherit run-test-suite-linux-c: runs-on: ubuntu-latest diff --git a/images/orb/Dockerfile b/images/orb/Dockerfile index 183aeb814..6763a87b4 100644 --- a/images/orb/Dockerfile +++ b/images/orb/Dockerfile @@ -13,7 +13,7 @@ FROM ubuntu:latest RUN mkdir -p /root/.noosphere RUN mkdir -p /root/sphere -ENV RUST_LOG="info,tower_http,noosphere,noosphere_cli,noosphere_ipfs,noosphere_storage,noosphere_core,noosphere_storage,noosphere_api,orb=debug" +ENV NOOSPHERE_LOG="deafening" VOLUME ["/root/.noosphere", "/root/sphere"] EXPOSE 4433 diff --git a/rust/noosphere-core/src/api/client.rs b/rust/noosphere-core/src/api/client.rs index fdf208d0c..91ac5c780 100644 --- a/rust/noosphere-core/src/api/client.rs +++ b/rust/noosphere-core/src/api/client.rs @@ -254,7 +254,10 @@ where .stream() .map(|block| match block { Ok(block) => Ok(block), - Err(error) => Err(anyhow!(error)), + Err(error) => { + warn!("Replication stream ended prematurely"); + Err(anyhow!(error)) + } }), ) } diff --git a/rust/noosphere-core/src/stream/memo.rs b/rust/noosphere-core/src/stream/memo.rs index 3471c9666..c2cee53e6 100644 --- a/rust/noosphere-core/src/stream/memo.rs +++ b/rust/noosphere-core/src/stream/memo.rs @@ -82,7 +82,7 @@ where }; if replicate_authority { - debug!("Replicating authority..."); + trace!("Replicating authority..."); let authority = sphere.get_authority().await?; let store = store.clone(); @@ -105,7 +105,7 @@ where } if replicate_address_book { - debug!("Replicating address book..."); + trace!("Replicating address book..."); let address_book = sphere.get_address_book().await?; let identities = address_book.get_identities().await?; @@ -121,7 +121,7 @@ where } if replicate_content { - debug!("Replicating content..."); + trace!("Replicating content..."); let content = sphere.get_content().await?; tasks.spawn(walk_versioned_map_changes_and(content, store.clone(), move |_, link, store| async move { diff --git a/rust/noosphere-core/src/view/content.rs b/rust/noosphere-core/src/view/content.rs index cfc13029c..96eb683d0 100644 --- a/rust/noosphere-core/src/view/content.rs +++ b/rust/noosphere-core/src/view/content.rs @@ -17,7 +17,7 @@ impl<'a, 'b, S: BlockStore> BodyChunkDecoder<'a, 'b, S> { let store = self.1.clone(); Box::pin(try_stream! { while let Some(cid) = next { - debug!("Unpacking block {}...", cid); + trace!("Unpacking block {}...", cid); let chunk = store.load::(&cid).await.map_err(|error| { std::io::Error::new(std::io::ErrorKind::UnexpectedEof, error.to_string()) })?; diff --git a/rust/noosphere-gateway/src/gateway.rs b/rust/noosphere-gateway/src/gateway.rs index 0b1f2665a..57e7f8145 100644 --- a/rust/noosphere-gateway/src/gateway.rs +++ b/rust/noosphere-gateway/src/gateway.rs @@ -78,7 +78,8 @@ where let ipfs_client = KuboClient::new(&ipfs_api)?; - let (syndication_tx, syndication_task) = start_ipfs_syndication::(ipfs_api.clone()); + let (syndication_tx, syndication_task) = + start_ipfs_syndication::(ipfs_api.clone(), vec![sphere_context.clone()]); let (name_system_tx, name_system_task) = start_name_system::( NameSystemConfiguration { connection_type: NameSystemConnectionType::Remote(name_resolver_api), diff --git a/rust/noosphere-gateway/src/handlers/v0alpha2/push.rs b/rust/noosphere-gateway/src/handlers/v0alpha2/push.rs index 06fdc442d..f408ea5b9 100644 --- a/rust/noosphere-gateway/src/handlers/v0alpha2/push.rs +++ b/rust/noosphere-gateway/src/handlers/v0alpha2/push.rs @@ -105,7 +105,7 @@ where self.incorporate_history(&push_body).await?; self.synchronize_names(&push_body).await?; - let (next_version, new_blocks) = self.update_gateway_sphere().await?; + let (next_version, new_blocks) = self.update_gateway_sphere(&push_body).await?; // These steps are order-independent let _ = tokio::join!( @@ -121,8 +121,16 @@ where })?; for await block in new_blocks { - yield block?; + match block { + Ok(block) => yield block, + Err(error) => { + warn!("Failed stream final gateway blocks: {}", error); + Err(error)?; + } + } } + + info!("Finished gateway push routine!"); }; Ok(to_car_stream(roots, block_stream)) @@ -216,7 +224,7 @@ where for step in history.into_iter().rev() { let (cid, sphere) = step?; - debug!("Hydrating {}", cid); + trace!("Hydrating {}", cid); sphere.hydrate().await?; } @@ -320,25 +328,24 @@ where /// synchronize the pusher with the latest local history. async fn update_gateway_sphere( &mut self, + push_body: &PushBody, ) -> Result<(Link, impl Stream)>>), PushError> { debug!("Updating the gateway's sphere..."); - // NOTE CDATA: "Previous version" doesn't cover all cases; this needs to be a version given - // in the push body, or else we don't know how far back we actually have to go (e.g., the name - // system may have created a new version in the mean time. - let previous_version = self.sphere_context.version().await?; + let previous_version = push_body.counterpart_tip.as_ref(); let next_version = SphereCursor::latest(self.sphere_context.clone()) .save(None) .await?; let db = self.sphere_context.sphere_context().await?.db().clone(); - let block_stream = memo_history_stream(db, &next_version, Some(&previous_version), false); + let block_stream = memo_history_stream(db, &next_version, previous_version, false); Ok((next_version, block_stream)) } /// Notify the name system that new names may need to be resolved async fn notify_name_resolver(&self, push_body: &PushBody) -> Result<()> { + debug!("Notifying name system of new link record..."); if let Some(name_record) = &push_body.name_record { if let Err(error) = self.name_system_tx.send(NameSystemJob::Publish { context: self.sphere_context.clone(), @@ -349,9 +356,8 @@ where } } - if let Err(error) = self.name_system_tx.send(NameSystemJob::ResolveSince { + if let Err(error) = self.name_system_tx.send(NameSystemJob::ResolveAll { context: self.sphere_context.clone(), - since: push_body.local_base, }) { warn!("Failed to request name system resolutions: {}", error); }; @@ -361,6 +367,7 @@ where /// Request that new history be syndicated to IPFS async fn notify_ipfs_syndicator(&self, next_version: Link) -> Result<()> { + debug!("Notifying syndication worker of new blocks..."); // TODO(#156): This should not be happening on every push, but rather on // an explicit publish action. Move this to the publish handler when we // have added it to the gateway. diff --git a/rust/noosphere-gateway/src/worker/cleanup.rs b/rust/noosphere-gateway/src/worker/cleanup.rs index 3396bba87..94c29f57b 100644 --- a/rust/noosphere-gateway/src/worker/cleanup.rs +++ b/rust/noosphere-gateway/src/worker/cleanup.rs @@ -71,6 +71,7 @@ where } } +#[instrument(skip(job))] async fn process_job(job: CleanupJob) -> Result<()> where C: HasMutableSphereContext, diff --git a/rust/noosphere-gateway/src/worker/name_system.rs b/rust/noosphere-gateway/src/worker/name_system.rs index a64c4a11b..8ef135ae6 100644 --- a/rust/noosphere-gateway/src/worker/name_system.rs +++ b/rust/noosphere-gateway/src/worker/name_system.rs @@ -130,7 +130,7 @@ where loop { for local_sphere in local_spheres.iter() { if let Err(error) = periodic_publish_record(&tx, local_sphere).await { - error!("Could not publish record: {}", error); + error!("Periodic re-publish of link record failed: {}", error); }; } tokio::time::sleep(Duration::from_secs(PERIODIC_PUBLISH_INTERVAL_SECONDS)).await; @@ -153,7 +153,7 @@ where record, republish: true, }) { - warn!("Failed to request name record publish: {}", error); + warn!("Failed to request link record publish: {}", error); } } _ => { @@ -212,6 +212,7 @@ where Ok(()) } +#[instrument(skip(job, with_client))] async fn process_job( job: NameSystemJob, with_client: &mut TryOrReset, @@ -393,7 +394,7 @@ where // TODO(#260): What if the resolved value is None? Some(record) if last_known_record != next_record => { debug!( - "Gateway adopting petname record for '{}' ({}): {}", + "Gateway adopting petname link record for '{}' ({}): {}", name, identity.did, record ); @@ -404,7 +405,7 @@ where } if let Err(e) = context.set_petname_record(&name, record).await { - warn!("Could not set petname record: {}", e); + warn!("Could not set petname link record: {}", e); continue; } } @@ -419,7 +420,7 @@ where Ok(()) } -/// Attempts to fetch a single name record from the name system. +/// Attempts to fetch a single link record from the name system. async fn fetch_record( client: Arc, name: String, diff --git a/rust/noosphere-gateway/src/worker/syndication.rs b/rust/noosphere-gateway/src/worker/syndication.rs index 66c138caf..bb9739bec 100644 --- a/rust/noosphere-gateway/src/worker/syndication.rs +++ b/rust/noosphere-gateway/src/worker/syndication.rs @@ -39,19 +39,17 @@ pub struct SyndicationJob { } /// A [SyndicationCheckpoint] represents the last spot in the history of a -/// sphere that was successfully syndicated to an IPFS node. It records a Bloom -/// filter populated by the CIDs of all blocks that have been syndicated, which -/// gives us a short-cut to determine if a block should be added. +/// sphere that was successfully syndicated to an IPFS node. #[derive(Serialize, Deserialize)] pub struct SyndicationCheckpoint { - pub last_syndicated_version: Option>, + pub last_syndicated_counterpart_version: Option>, pub syndication_epoch: u64, } impl SyndicationCheckpoint { pub fn new() -> Result { Ok(Self { - last_syndicated_version: None, + last_syndicated_counterpart_version: None, syndication_epoch: SystemTime::now().duration_since(UNIX_EPOCH)?.as_secs(), }) } @@ -70,14 +68,18 @@ impl SyndicationCheckpoint { } } -// Re-syndicate every 90 days +// Force full re-syndicate every 90 days const MAX_SYNDICATION_CHECKPOINT_LIFETIME: Duration = Duration::from_secs(60 * 60 * 24 * 90); +// Periodic syndication check every 5 minutes +const PERIODIC_SYNDICATION_INTERVAL_SECONDS: Duration = Duration::from_secs(5 * 60); + /// Start a Tokio task that waits for [SyndicationJob] messages and then /// attempts to syndicate to the configured IPFS RPC. Currently only Kubo IPFS /// backends are supported. pub fn start_ipfs_syndication( ipfs_api: Url, + local_spheres: Vec, ) -> (UnboundedSender>, JoinHandle>) where C: HasMutableSphereContext + 'static, @@ -85,7 +87,57 @@ where { let (tx, rx) = unbounded_channel(); - (tx, tokio::task::spawn(ipfs_syndication_task(ipfs_api, rx))) + let task = { + let tx = tx.clone(); + tokio::task::spawn(async move { + let (_, syndication_result) = tokio::join!( + periodic_syndication_task(tx, local_spheres), + ipfs_syndication_task(ipfs_api, rx) + ); + syndication_result?; + Ok(()) + }) + }; + + (tx, task) +} + +async fn periodic_syndication_task( + tx: UnboundedSender>, + local_spheres: Vec, +) where + C: HasMutableSphereContext, + S: Storage + 'static, +{ + loop { + for local_sphere in &local_spheres { + if let Err(error) = periodic_syndication(&tx, local_sphere).await { + error!("Periodic syndication of sphere history failed: {}", error); + }; + tokio::time::sleep(Duration::from_secs(5)).await; + } + tokio::time::sleep(PERIODIC_SYNDICATION_INTERVAL_SECONDS).await; + } +} + +async fn periodic_syndication( + tx: &UnboundedSender>, + local_sphere: &C, +) -> Result<()> +where + C: HasMutableSphereContext, + S: Storage + 'static, +{ + let latest_version = local_sphere.version().await?; + + if let Err(error) = tx.send(SyndicationJob { + revision: latest_version, + context: local_sphere.clone(), + }) { + warn!("Failed to request periodic syndication: {}", error); + }; + + Ok(()) } async fn ipfs_syndication_task( @@ -108,6 +160,7 @@ where Ok(()) } +#[instrument(skip(job, kubo_client))] async fn process_job( job: SyndicationJob, kubo_client: Arc, @@ -165,14 +218,23 @@ where None => SyndicationCheckpoint::new()?, }; + if Some(counterpart_revision) == syndication_checkpoint.last_syndicated_counterpart_version + { + warn!("Counterpart version hasn't changed; skipping syndication"); + return Ok(()); + } + (counterpart_revision, syndication_checkpoint, db) }; let timeline = Timeline::new(&db) .slice( &sphere_revision, - syndication_checkpoint.last_syndicated_version.as_ref(), + syndication_checkpoint + .last_syndicated_counterpart_version + .as_ref(), ) + .exclude_past() .to_chronological() .await?; @@ -204,7 +266,7 @@ where { Ok(_) => { debug!("Syndicated sphere revision {} to IPFS", cid); - syndication_checkpoint.last_syndicated_version = Some(cid); + syndication_checkpoint.last_syndicated_counterpart_version = Some(cid); } Err(error) => warn!( "Failed to pin orphans for revision {} to IPFS: {:?}", @@ -281,7 +343,8 @@ mod tests { let ipfs_url = Url::parse("http://127.0.0.1:5001")?; let local_kubo_client = KuboClient::new(&ipfs_url.clone())?; - let (syndication_tx, _syndication_join_handle) = start_ipfs_syndication::<_, _>(ipfs_url); + let (syndication_tx, _syndication_join_handle) = + start_ipfs_syndication::<_, _>(ipfs_url, vec![user_sphere_context.clone()]); user_sphere_context .write("foo", &ContentType::Text, b"bar".as_ref(), None) diff --git a/rust/noosphere-storage/examples/bench/main.rs b/rust/noosphere-storage/examples/bench/main.rs index 36484568e..bacdd922c 100644 --- a/rust/noosphere-storage/examples/bench/main.rs +++ b/rust/noosphere-storage/examples/bench/main.rs @@ -132,7 +132,7 @@ impl BenchmarkStorage { ))] let (storage, storage_name) = { ( - noosphere_storage::SledStorage::new(&storage_path)?, + noosphere_storage::SledStorage::new(storage_path)?, "SledDbStorage", ) }; diff --git a/rust/noosphere-storage/src/retry.rs b/rust/noosphere-storage/src/retry.rs index c83a41d26..697ff04ba 100644 --- a/rust/noosphere-storage/src/retry.rs +++ b/rust/noosphere-storage/src/retry.rs @@ -7,11 +7,11 @@ use tokio::select; use crate::BlockStore; const DEFAULT_MAX_RETRIES: u32 = 2u32; -const DEFAULT_TIMEOUT: Duration = Duration::from_millis(1500); -const DEFAULT_MINIMUM_DELAY: Duration = Duration::from_secs(1); -const DEFAULT_BACKOFF: Backoff = Backoff::Exponential { - exponent: 2f32, - ceiling: Duration::from_secs(6), +const DEFAULT_TIMEOUT: Duration = Duration::from_secs(1); +const DEFAULT_MINIMUM_DELAY: Duration = Duration::from_millis(100); +const DEFAULT_BACKOFF: Backoff = Backoff::Linear { + increment: Duration::from_secs(1), + ceiling: Duration::from_secs(3), }; /// Backoff configuration used to define how [BlockStoreRetry] should time @@ -127,17 +127,28 @@ where }; }, _ = tokio::time::sleep(next_timeout) => { - warn!("Timed out trying to get {} after {} seconds...", cid, next_timeout.as_secs()); + warn!("Timed out trying to get {} after {} seconds...", cid, next_timeout.as_secs_f32()); } } let spent_window_time = Instant::now() - window_start; - let remaining_window_time = spent_window_time.max(self.minimum_delay); + + // NOTE: Be careful here; `Duration` will overflow when dealing with + // negative values so these operations are effectively fallible. + // https://doc.rust-lang.org/std/time/struct.Duration.html#panics-7 + let remaining_window_time = self.attempt_window + - spent_window_time + .max(self.minimum_delay) + .min(self.attempt_window); retry_count += 1; if let Some(backoff) = &self.backoff { next_timeout = backoff.step(next_timeout); + trace!( + "Next timeout will be {} seconds", + next_timeout.as_secs_f32() + ); } tokio::time::sleep(remaining_window_time).await; diff --git a/rust/noosphere/tests/distributed_stress.rs b/rust/noosphere/tests/distributed_stress.rs index 3abd38093..4bfeb3114 100644 --- a/rust/noosphere/tests/distributed_stress.rs +++ b/rust/noosphere/tests/distributed_stress.rs @@ -150,6 +150,7 @@ mod multiplayer { use serde_json::Value; use url::Url; + #[cfg(not(feature = "rocksdb"))] #[tokio::test(flavor = "multi_thread")] async fn orb_can_render_peers_in_the_sphere_address_book() -> Result<()> { initialize_tracing(None);