diff --git a/Cargo.lock b/Cargo.lock index b32e561d48c..98d0a971eff 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -486,10 +486,11 @@ name = "autonat-example" version = "0.1.0" dependencies = [ "clap", - "env_logger 0.10.0", "futures", "libp2p", "tokio", + "tracing", + "tracing-subscriber", ] [[package]] @@ -679,13 +680,11 @@ version = "0.1.0" dependencies = [ "anyhow", "axum", - "env_logger 0.10.0", "futures", "js-sys", "libp2p", "libp2p-webrtc", "libp2p-webrtc-websys", - "log", "mime_guess", "rand 0.8.5", "rust-embed", @@ -693,6 +692,8 @@ dependencies = [ "tokio-util", "tower", "tower-http", + "tracing", + "tracing-subscriber", "wasm-bindgen", "wasm-bindgen-futures", "wasm-logger", @@ -820,10 +821,11 @@ name = "chat-example" version = "0.1.0" dependencies = [ "async-trait", - "env_logger 0.10.0", "futures", "libp2p", "tokio", + "tracing", + "tracing-subscriber", ] [[package]] @@ -1223,12 +1225,13 @@ name = "dcutr-example" version = "0.1.0" dependencies = [ "clap", - "env_logger 0.10.0", "futures", "futures-timer", "libp2p", "log", "tokio", + "tracing", + "tracing-subscriber", ] [[package]] @@ -1314,9 +1317,10 @@ version = "0.1.0" dependencies = [ "async-std", "async-trait", - "env_logger 0.10.0", "futures", "libp2p", + "tracing", + "tracing-subscriber", ] [[package]] @@ -1527,10 +1531,11 @@ dependencies = [ "async-std", "clap", "either", - "env_logger 0.10.0", "futures", "libp2p", "serde", + "tracing", + "tracing-subscriber", "void", ] @@ -1943,11 +1948,11 @@ dependencies = [ "env_logger 0.10.0", "futures", "libp2p", - "log", "redis", "serde", "serde_json", "tokio", + "tracing", ] [[package]] @@ -2046,6 +2051,18 @@ dependencies = [ "tokio-rustls 0.23.4", ] +[[package]] +name = "hyper-timeout" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +dependencies = [ + "hyper", + "pin-project-lite", + "tokio", + "tokio-io-timeout", +] + [[package]] name = "hyper-tls" version = "0.5.0" @@ -2065,9 +2082,10 @@ version = "0.1.0" dependencies = [ "async-std", "async-trait", - "env_logger 0.10.0", "futures", "libp2p", + "tracing", + "tracing-subscriber", ] [[package]] @@ -2198,7 +2216,6 @@ dependencies = [ "axum", "console_error_panic_hook", "either", - "env_logger 0.10.0", "futures", "futures-timer", "instant", @@ -2208,7 +2225,6 @@ dependencies = [ "libp2p-tls", "libp2p-webrtc", "libp2p-webrtc-websys", - "log", "mime_guess", "rand 0.8.5", "redis", @@ -2260,6 +2276,8 @@ dependencies = [ "futures", "libp2p", "tokio", + "tracing", + "tracing-subscriber", ] [[package]] @@ -2268,10 +2286,11 @@ version = "0.1.0" dependencies = [ "async-trait", "either", - "env_logger 0.10.0", "futures", "libp2p", "tokio", + "tracing", + "tracing-subscriber", ] [[package]] @@ -2367,7 +2386,6 @@ dependencies = [ "bytes", "clap", "either", - "env_logger 0.10.0", "futures", "futures-timer", "getrandom 0.2.10", @@ -2409,6 +2427,7 @@ dependencies = [ "rw-stream-sink", "thiserror", "tokio", + "tracing-subscriber", ] [[package]] @@ -2431,7 +2450,6 @@ dependencies = [ "async-std", "async-trait", "asynchronous-codec 0.6.2", - "env_logger 0.10.0", "futures", "futures-timer", "instant", @@ -2440,10 +2458,11 @@ dependencies = [ "libp2p-request-response", "libp2p-swarm", "libp2p-swarm-test", - "log", "quick-protobuf", "quick-protobuf-codec", "rand 0.8.5", + "tracing", + "tracing-subscriber", ] [[package]] @@ -2476,7 +2495,6 @@ dependencies = [ "libp2p-identity", "libp2p-mplex", "libp2p-noise", - "log", "multiaddr", "multihash", "multistream-select", @@ -2490,6 +2508,7 @@ dependencies = [ "serde", "smallvec", "thiserror", + "tracing", "unsigned-varint", "void", ] @@ -2502,7 +2521,6 @@ dependencies = [ "asynchronous-codec 0.6.2", "clap", "either", - "env_logger 0.10.0", "futures", "futures-bounded", "futures-timer", @@ -2519,12 +2537,13 @@ dependencies = [ "libp2p-swarm-test", "libp2p-tcp", "libp2p-yamux", - "log", "lru 0.11.1", "quick-protobuf", "quick-protobuf-codec", "rand 0.8.5", "thiserror", + "tracing", + "tracing-subscriber", "void", ] @@ -2535,14 +2554,14 @@ dependencies = [ "async-std", "async-std-resolver", "async-trait", - "env_logger 0.10.0", "futures", "libp2p-core", "libp2p-identity", - "log", "parking_lot", "smallvec", "tokio", + "tracing", + "tracing-subscriber", "trust-dns-resolver", ] @@ -2558,12 +2577,12 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm", - "log", "quick-protobuf", "quick-protobuf-codec", "rand 0.8.5", "smallvec", "thiserror", + "tracing", ] [[package]] @@ -2576,7 +2595,6 @@ dependencies = [ "byteorder", "bytes", "either", - "env_logger 0.10.0", "fnv", "futures", "futures-ticker", @@ -2590,7 +2608,6 @@ dependencies = [ "libp2p-swarm", "libp2p-swarm-test", "libp2p-yamux", - "log", "prometheus-client", "quick-protobuf", "quick-protobuf-codec", @@ -2600,6 +2617,8 @@ dependencies = [ "serde", "sha2 0.10.8", "smallvec", + "tracing", + "tracing-subscriber", "unsigned-varint", "void", ] @@ -2611,7 +2630,6 @@ dependencies = [ "async-std", "asynchronous-codec 0.6.2", "either", - "env_logger 0.10.0", "futures", "futures-bounded", "futures-timer", @@ -2619,12 +2637,13 @@ dependencies = [ "libp2p-identity", "libp2p-swarm", "libp2p-swarm-test", - "log", "lru 0.12.0", "quick-protobuf", "quick-protobuf-codec", "smallvec", "thiserror", + "tracing", + "tracing-subscriber", "void", ] @@ -2640,7 +2659,6 @@ dependencies = [ "hex-literal", "hkdf", "libsecp256k1", - "log", "multihash", "p256", "quick-protobuf", @@ -2653,6 +2671,7 @@ dependencies = [ "serde_json", "sha2 0.10.8", "thiserror", + "tracing", "void", "zeroize", ] @@ -2666,7 +2685,6 @@ dependencies = [ "asynchronous-codec 0.6.2", "bytes", "either", - "env_logger 0.10.0", "fnv", "futures", "futures-timer", @@ -2678,7 +2696,6 @@ dependencies = [ "libp2p-swarm", "libp2p-swarm-test", "libp2p-yamux", - "log", "quick-protobuf", "quick-protobuf-codec", "quickcheck-ext", @@ -2687,6 +2704,8 @@ dependencies = [ "sha2 0.10.8", "smallvec", "thiserror", + "tracing", + "tracing-subscriber", "uint", "unsigned-varint", "void", @@ -2699,7 +2718,6 @@ dependencies = [ "async-io", "async-std", "data-encoding", - "env_logger 0.10.0", "futures", "if-watch", "libp2p-core", @@ -2709,11 +2727,12 @@ dependencies = [ "libp2p-swarm-test", "libp2p-tcp", "libp2p-yamux", - "log", "rand 0.8.5", "smallvec", "socket2 0.5.5", "tokio", + "tracing", + "tracing-subscriber", "trust-dns-proto", "void", ] @@ -2729,10 +2748,10 @@ dependencies = [ "libp2p-swarm", "libp2p-swarm-derive", "libp2p-swarm-test", - "log", "memory-stats", "rand 0.8.5", "sysinfo", + "tracing", "void", ] @@ -2761,19 +2780,19 @@ dependencies = [ "asynchronous-codec 0.6.2", "bytes", "criterion", - "env_logger 0.10.0", "futures", "libp2p-core", "libp2p-identity", "libp2p-muxer-test-harness", "libp2p-plaintext", "libp2p-tcp", - "log", "nohash-hasher", "parking_lot", "quickcheck-ext", "rand 0.8.5", "smallvec", + "tracing", + "tracing-subscriber", "unsigned-varint", ] @@ -2785,7 +2804,7 @@ dependencies = [ "futures-timer", "futures_ringbuf", "libp2p-core", - "log", + "tracing", ] [[package]] @@ -2795,12 +2814,10 @@ dependencies = [ "asynchronous-codec 0.7.0", "bytes", "curve25519-dalek", - "env_logger 0.10.0", "futures", "futures_ringbuf", "libp2p-core", "libp2p-identity", - "log", "multiaddr", "multihash", "once_cell", @@ -2811,6 +2828,8 @@ dependencies = [ "snow", "static_assertions", "thiserror", + "tracing", + "tracing-subscriber", "x25519-dalek", "zeroize", ] @@ -2821,7 +2840,6 @@ version = "0.3.0" dependencies = [ "anyhow", "clap", - "env_logger 0.10.0", "futures", "futures-bounded", "futures-timer", @@ -2836,12 +2854,13 @@ dependencies = [ "libp2p-tcp", "libp2p-tls", "libp2p-yamux", - "log", "rand 0.8.5", "serde", "serde_json", "thiserror", "tokio", + "tracing", + "tracing-subscriber", "void", ] @@ -2851,7 +2870,6 @@ version = "0.44.0" dependencies = [ "async-std", "either", - "env_logger 0.10.0", "futures", "futures-timer", "instant", @@ -2859,9 +2877,10 @@ dependencies = [ "libp2p-identity", "libp2p-swarm", "libp2p-swarm-test", - "log", "quickcheck-ext", "rand 0.8.5", + "tracing", + "tracing-subscriber", "void", ] @@ -2871,16 +2890,16 @@ version = "0.41.0" dependencies = [ "asynchronous-codec 0.6.2", "bytes", - "env_logger 0.10.0", "futures", "futures_ringbuf", "libp2p-core", "libp2p-identity", - "log", "quick-protobuf", "quick-protobuf-codec", "quickcheck-ext", "rand 0.8.5", + "tracing", + "tracing-subscriber", ] [[package]] @@ -2895,13 +2914,13 @@ dependencies = [ "libp2p-tcp", "libp2p-websocket", "libp2p-yamux", - "log", "pin-project", "quickcheck-ext", "rand 0.8.5", "salsa20", "sha3", "tokio", + "tracing", ] [[package]] @@ -2910,7 +2929,6 @@ version = "0.10.0" dependencies = [ "async-std", "bytes", - "env_logger 0.10.0", "futures", "futures-timer", "if-watch", @@ -2921,7 +2939,6 @@ dependencies = [ "libp2p-tcp", "libp2p-tls", "libp2p-yamux", - "log", "parking_lot", "quickcheck", "quinn", @@ -2931,6 +2948,8 @@ dependencies = [ "socket2 0.5.5", "thiserror", "tokio", + "tracing", + "tracing-subscriber", ] [[package]] @@ -2940,7 +2959,6 @@ dependencies = [ "asynchronous-codec 0.6.2", "bytes", "either", - "env_logger 0.10.0", "futures", "futures-bounded", "futures-timer", @@ -2952,13 +2970,14 @@ dependencies = [ "libp2p-swarm", "libp2p-swarm-test", "libp2p-yamux", - "log", "quick-protobuf", "quick-protobuf-codec", "quickcheck-ext", "rand 0.8.5", "static_assertions", "thiserror", + "tracing", + "tracing-subscriber", "void", ] @@ -2969,7 +2988,6 @@ dependencies = [ "async-trait", "asynchronous-codec 0.6.2", "bimap", - "env_logger 0.10.0", "futures", "futures-timer", "instant", @@ -2983,12 +3001,13 @@ dependencies = [ "libp2p-swarm-test", "libp2p-tcp", "libp2p-yamux", - "log", "quick-protobuf", "quick-protobuf-codec", "rand 0.8.5", "thiserror", "tokio", + "tracing", + "tracing-subscriber", "void", ] @@ -3000,7 +3019,6 @@ dependencies = [ "async-std", "async-trait", "cbor4ii", - "env_logger 0.10.0", "futures", "futures-bounded", "futures-timer", @@ -3013,11 +3031,12 @@ dependencies = [ "libp2p-swarm-test", "libp2p-tcp", "libp2p-yamux", - "log", "rand 0.8.5", "serde", "serde_json", "smallvec", + "tracing", + "tracing-subscriber", "void", ] @@ -3027,17 +3046,17 @@ version = "0.12.3" dependencies = [ "base64 0.21.5", "clap", - "env_logger 0.10.0", "futures", "futures-timer", "hyper", "libp2p", - "log", "prometheus-client", "serde", "serde_derive", "serde_json", "tokio", + "tracing", + "tracing-subscriber", "zeroize", ] @@ -3047,7 +3066,6 @@ version = "0.44.0" dependencies = [ "async-std", "either", - "env_logger 0.10.0", "fnv", "futures", "futures-timer", @@ -3062,13 +3080,14 @@ dependencies = [ "libp2p-swarm-derive", "libp2p-swarm-test", "libp2p-yamux", - "log", "multistream-select", "once_cell", "quickcheck-ext", "rand 0.8.5", "smallvec", "tokio", + "tracing", + "tracing-subscriber", "trybuild", "void", "wasm-bindgen-futures", @@ -3097,8 +3116,8 @@ dependencies = [ "libp2p-swarm", "libp2p-tcp", "libp2p-yamux", - "log", "rand 0.8.5", + "tracing", ] [[package]] @@ -3107,16 +3126,16 @@ version = "0.41.0" dependencies = [ "async-io", "async-std", - "env_logger 0.10.0", "futures", "futures-timer", "if-watch", "libc", "libp2p-core", "libp2p-identity", - "log", "socket2 0.5.5", "tokio", + "tracing", + "tracing-subscriber", ] [[package]] @@ -3148,9 +3167,9 @@ dependencies = [ "async-std", "futures", "libp2p-core", - "log", "tempfile", "tokio", + "tracing", ] [[package]] @@ -3162,8 +3181,8 @@ dependencies = [ "igd-next", "libp2p-core", "libp2p-swarm", - "log", "tokio", + "tracing", "void", ] @@ -3173,7 +3192,6 @@ version = "0.6.1-alpha" dependencies = [ "async-trait", "bytes", - "env_logger 0.10.0", "futures", "futures-timer", "hex", @@ -3182,7 +3200,6 @@ dependencies = [ "libp2p-identity", "libp2p-noise", "libp2p-webrtc-utils", - "log", "multihash", "quickcheck", "rand 0.8.5", @@ -3193,6 +3210,8 @@ dependencies = [ "tinytemplate", "tokio", "tokio-util", + "tracing", + "tracing-subscriber", "webrtc", ] @@ -3208,7 +3227,6 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-noise", - "log", "quick-protobuf", "quick-protobuf-codec", "rand 0.8.5", @@ -3216,6 +3234,7 @@ dependencies = [ "sha2 0.10.8", "thiserror", "tinytemplate", + "tracing", "unsigned-varint", ] @@ -3236,10 +3255,10 @@ dependencies = [ "libp2p-ping", "libp2p-swarm", "libp2p-webrtc-utils", - "log", "send_wrapper 0.6.0", "serde", "thiserror", + "tracing", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -3257,12 +3276,12 @@ dependencies = [ "libp2p-dns", "libp2p-identity", "libp2p-tcp", - "log", "parking_lot", "pin-project-lite", "rcgen", "rw-stream-sink", "soketto", + "tracing", "url", "webpki-roots", ] @@ -3278,10 +3297,10 @@ dependencies = [ "libp2p-identity", "libp2p-noise", "libp2p-yamux", - "log", "parking_lot", "send_wrapper 0.6.0", "thiserror", + "tracing", "wasm-bindgen", "web-sys", ] @@ -3295,12 +3314,12 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-noise", - "log", "multiaddr", "multibase", "multihash", "send_wrapper 0.6.0", "thiserror", + "tracing", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -3314,8 +3333,8 @@ dependencies = [ "futures", "libp2p-core", "libp2p-muxer-test-harness", - "log", "thiserror", + "tracing", "yamux", ] @@ -3499,13 +3518,17 @@ dependencies = [ name = "metrics-example" version = "0.1.0" dependencies = [ - "env_logger 0.10.0", "futures", "hyper", "libp2p", - "log", + "opentelemetry", + "opentelemetry-otlp", + "opentelemetry_api", "prometheus-client", "tokio", + "tracing", + "tracing-opentelemetry", + "tracing-subscriber", ] [[package]] @@ -3600,15 +3623,15 @@ version = "0.13.0" dependencies = [ "async-std", "bytes", - "env_logger 0.10.0", "futures", "futures_ringbuf", - "log", "pin-project", "quickcheck-ext", "rand 0.8.5", "rw-stream-sink", "smallvec", + "tracing", + "tracing-subscriber", "unsigned-varint", ] @@ -3876,6 +3899,104 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "opentelemetry" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9591d937bc0e6d2feb6f71a559540ab300ea49955229c347a517a28d27784c54" +dependencies = [ + "opentelemetry_api", + "opentelemetry_sdk", +] + +[[package]] +name = "opentelemetry-otlp" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e5e5a5c4135864099f3faafbe939eb4d7f9b80ebf68a8448da961b32a7c1275" +dependencies = [ + "async-trait", + "futures-core", + "http", + "opentelemetry-proto", + "opentelemetry-semantic-conventions", + "opentelemetry_api", + "opentelemetry_sdk", + "prost", + "thiserror", + "tokio", + "tonic", +] + +[[package]] +name = "opentelemetry-proto" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1e3f814aa9f8c905d0ee4bde026afd3b2577a97c10e1699912e3e44f0c4cbeb" +dependencies = [ + "opentelemetry_api", + "opentelemetry_sdk", + "prost", + "tonic", +] + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73c9f9340ad135068800e7f1b24e9e09ed9e7143f5bf8518ded3d3ec69789269" +dependencies = [ + "opentelemetry", +] + +[[package]] +name = "opentelemetry_api" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a81f725323db1b1206ca3da8bb19874bbd3f57c3bcd59471bfb04525b265b9b" +dependencies = [ + "futures-channel", + "futures-util", + "indexmap 1.9.3", + "js-sys", + "once_cell", + "pin-project-lite", + "thiserror", + "urlencoding", +] + +[[package]] +name = "opentelemetry_sdk" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa8e705a0612d48139799fcbaba0d4a90f06277153e43dd2bdc16c6f0edd8026" +dependencies = [ + "async-trait", + "crossbeam-channel", + "futures-channel", + "futures-executor", + "futures-util", + "once_cell", + "opentelemetry_api", + "ordered-float", + "percent-encoding", + "rand 0.8.5", + "regex", + "serde_json", + "thiserror", + "tokio", + "tokio-stream", +] + +[[package]] +name = "ordered-float" +version = "3.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1e1c390732d15f1d48471625cd92d154e66db2c56645e29a9cd26f4699f72dc" +dependencies = [ + "num-traits", +] + [[package]] name = "overload" version = "0.1.1" @@ -4002,10 +4123,11 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" name = "ping-example" version = "0.1.0" dependencies = [ - "env_logger 0.10.0", "futures", "libp2p", "tokio", + "tracing", + "tracing-subscriber", ] [[package]] @@ -4180,6 +4302,29 @@ dependencies = [ "syn 2.0.38", ] +[[package]] +name = "prost" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-derive" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "quick-error" version = "1.2.3" @@ -4498,9 +4643,10 @@ dependencies = [ "async-std", "async-trait", "clap", - "env_logger 0.10.0", "futures", "libp2p", + "tracing", + "tracing-subscriber", ] [[package]] @@ -4509,11 +4655,11 @@ version = "0.1.0" dependencies = [ "async-std", "async-trait", - "env_logger 0.10.0", "futures", "libp2p", - "log", "tokio", + "tracing", + "tracing-subscriber", ] [[package]] @@ -5540,6 +5686,16 @@ dependencies = [ "windows-sys", ] +[[package]] +name = "tokio-io-timeout" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" +dependencies = [ + "pin-project-lite", + "tokio", +] + [[package]] name = "tokio-macros" version = "2.1.0" @@ -5582,6 +5738,17 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-stream" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + [[package]] name = "tokio-util" version = "0.7.10" @@ -5597,6 +5764,34 @@ dependencies = [ "tracing", ] +[[package]] +name = "tonic" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3082666a3a6433f7f511c7192923fa1fe07c69332d3c6a2e6bb040b569199d5a" +dependencies = [ + "async-trait", + "axum", + "base64 0.21.5", + "bytes", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "hyper", + "hyper-timeout", + "percent-encoding", + "pin-project", + "prost", + "tokio", + "tokio-stream", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "tower" version = "0.4.13" @@ -5605,9 +5800,13 @@ checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ "futures-core", "futures-util", + "indexmap 1.9.3", "pin-project", "pin-project-lite", + "rand 0.8.5", + "slab", "tokio", + "tokio-util", "tower-layer", "tower-service", "tracing", @@ -5694,6 +5893,22 @@ dependencies = [ "tracing-core", ] +[[package]] +name = "tracing-opentelemetry" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75327c6b667828ddc28f5e3f169036cb793c3f588d83bf0f262a7f062ffed3c8" +dependencies = [ + "once_cell", + "opentelemetry", + "opentelemetry_sdk", + "smallvec", + "tracing", + "tracing-core", + "tracing-log", + "tracing-subscriber", +] + [[package]] name = "tracing-subscriber" version = "0.3.17" @@ -5919,6 +6134,7 @@ dependencies = [ "futures", "libp2p", "tokio", + "tracing-subscriber", ] [[package]] @@ -5932,6 +6148,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "urlencoding" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" + [[package]] name = "utf8parse" version = "0.2.1" diff --git a/core/Cargo.toml b/core/Cargo.toml index 4cbfa827af6..b9ebb0ad851 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -17,7 +17,6 @@ futures = { version = "0.3.29", features = ["executor", "thread-pool"] } futures-timer = "3" instant = "0.1.12" libp2p-identity = { workspace = true, features = ["peerid", "ed25519"] } -log = "0.4" multiaddr = { workspace = true } multihash = { workspace = true } multistream-select = { workspace = true } @@ -30,6 +29,7 @@ rw-stream-sink = { workspace = true } serde = { version = "1", optional = true, features = ["derive"] } smallvec = "1.11.1" thiserror = "1.0" +tracing = "0.1.37" unsigned-varint = "0.7" void = "1" diff --git a/core/src/transport/choice.rs b/core/src/transport/choice.rs index 8d3bfdecb79..aa3acfc3231 100644 --- a/core/src/transport/choice.rs +++ b/core/src/transport/choice.rs @@ -22,7 +22,6 @@ use crate::either::EitherFuture; use crate::transport::{ListenerId, Transport, TransportError, TransportEvent}; use either::Either; use futures::future; -use log::{debug, trace}; use multiaddr::Multiaddr; use std::{pin::Pin, task::Context, task::Poll}; @@ -52,16 +51,16 @@ where id: ListenerId, addr: Multiaddr, ) -> Result<(), TransportError> { - trace!( - "Attempting to listen on {} using {}", - addr, + tracing::trace!( + address=%addr, + "Attempting to listen on address using {}", std::any::type_name::() ); let addr = match self.0.listen_on(id, addr) { Err(TransportError::MultiaddrNotSupported(addr)) => { - debug!( - "Failed to listen on {} using {}", - addr, + tracing::debug!( + address=%addr, + "Failed to listen on address using {}", std::any::type_name::() ); addr @@ -69,16 +68,16 @@ where res => return res.map_err(|err| err.map(Either::Left)), }; - trace!( - "Attempting to listen on {} using {}", - addr, + tracing::trace!( + address=%addr, + "Attempting to listen on address using {}", std::any::type_name::() ); let addr = match self.1.listen_on(id, addr) { Err(TransportError::MultiaddrNotSupported(addr)) => { - debug!( - "Failed to listen on {} using {}", - addr, + tracing::debug!( + address=%addr, + "Failed to listen on address using {}", std::any::type_name::() ); addr @@ -94,17 +93,17 @@ where } fn dial(&mut self, addr: Multiaddr) -> Result> { - trace!( - "Attempting to dial {} using {}", - addr, + tracing::trace!( + address=%addr, + "Attempting to dial address using {}", std::any::type_name::() ); let addr = match self.0.dial(addr) { Ok(connec) => return Ok(EitherFuture::First(connec)), Err(TransportError::MultiaddrNotSupported(addr)) => { - debug!( - "Failed to dial {} using {}", - addr, + tracing::debug!( + address=%addr, + "Failed to dial address using {}", std::any::type_name::() ); addr @@ -114,17 +113,17 @@ where } }; - trace!( - "Attempting to dial {} using {}", - addr, + tracing::trace!( + address=%addr, + "Attempting to dial address using {}", std::any::type_name::() ); let addr = match self.1.dial(addr) { Ok(connec) => return Ok(EitherFuture::Second(connec)), Err(TransportError::MultiaddrNotSupported(addr)) => { - debug!( - "Failed to dial {} using {}", - addr, + tracing::debug!( + address=%addr, + "Failed to dial address using {}", std::any::type_name::() ); addr diff --git a/core/src/transport/global_only.rs b/core/src/transport/global_only.rs index 4f1fe8ab794..0671b0e9984 100644 --- a/core/src/transport/global_only.rs +++ b/core/src/transport/global_only.rs @@ -22,7 +22,6 @@ use crate::{ multiaddr::{Multiaddr, Protocol}, transport::{ListenerId, TransportError, TransportEvent}, }; -use log::debug; use std::{ pin::Pin, task::{Context, Poll}, @@ -292,20 +291,20 @@ impl crate::Transport for Transport { match addr.iter().next() { Some(Protocol::Ip4(a)) => { if !ipv4_global::is_global(a) { - debug!("Not dialing non global IP address {:?}.", a); + tracing::debug!(ip=%a, "Not dialing non global IP address"); return Err(TransportError::MultiaddrNotSupported(addr)); } self.inner.dial(addr) } Some(Protocol::Ip6(a)) => { if !ipv6_global::is_global(a) { - debug!("Not dialing non global IP address {:?}.", a); + tracing::debug!(ip=%a, "Not dialing non global IP address"); return Err(TransportError::MultiaddrNotSupported(addr)); } self.inner.dial(addr) } _ => { - debug!("Not dialing unsupported Multiaddress {:?}.", addr); + tracing::debug!(address=%addr, "Not dialing unsupported Multiaddress"); Err(TransportError::MultiaddrNotSupported(addr)) } } @@ -318,20 +317,20 @@ impl crate::Transport for Transport { match addr.iter().next() { Some(Protocol::Ip4(a)) => { if !ipv4_global::is_global(a) { - debug!("Not dialing non global IP address {:?}.", a); + tracing::debug!(ip=?a, "Not dialing non global IP address"); return Err(TransportError::MultiaddrNotSupported(addr)); } self.inner.dial_as_listener(addr) } Some(Protocol::Ip6(a)) => { if !ipv6_global::is_global(a) { - debug!("Not dialing non global IP address {:?}.", a); + tracing::debug!(ip=?a, "Not dialing non global IP address"); return Err(TransportError::MultiaddrNotSupported(addr)); } self.inner.dial_as_listener(addr) } _ => { - debug!("Not dialing unsupported Multiaddress {:?}.", addr); + tracing::debug!(address=%addr, "Not dialing unsupported Multiaddress"); Err(TransportError::MultiaddrNotSupported(addr)) } } diff --git a/core/src/upgrade/apply.rs b/core/src/upgrade/apply.rs index aefce686f01..15cb0348cf3 100644 --- a/core/src/upgrade/apply.rs +++ b/core/src/upgrade/apply.rs @@ -21,7 +21,6 @@ use crate::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeError}; use crate::{connection::ConnectedPoint, Negotiated}; use futures::{future::Either, prelude::*}; -use log::debug; use multistream_select::{self, DialerSelectFuture, ListenerSelectFuture}; use std::{mem, pin::Pin, task::Context, task::Poll}; @@ -141,11 +140,11 @@ where return Poll::Pending; } Poll::Ready(Ok(x)) => { - log::trace!("Upgraded inbound stream to {name}"); + tracing::trace!(upgrade=%name, "Upgraded inbound stream"); return Poll::Ready(Ok(x)); } Poll::Ready(Err(e)) => { - debug!("Failed to upgrade inbound stream to {name}"); + tracing::debug!(upgrade=%name, "Failed to upgrade inbound stream"); return Poll::Ready(Err(UpgradeError::Apply(e))); } } @@ -223,11 +222,11 @@ where return Poll::Pending; } Poll::Ready(Ok(x)) => { - log::trace!("Upgraded outbound stream to {name}",); + tracing::trace!(upgrade=%name, "Upgraded outbound stream"); return Poll::Ready(Ok(x)); } Poll::Ready(Err(e)) => { - debug!("Failed to upgrade outbound stream to {name}",); + tracing::debug!(upgrade=%name, "Failed to upgrade outbound stream",); return Poll::Ready(Err(UpgradeError::Apply(e))); } } diff --git a/examples/autonat/Cargo.toml b/examples/autonat/Cargo.toml index 712e26f1c44..9a4f2b4df86 100644 --- a/examples/autonat/Cargo.toml +++ b/examples/autonat/Cargo.toml @@ -11,9 +11,10 @@ release = false [dependencies] tokio = { version = "1.33", features = ["full"] } clap = { version = "4.4.7", features = ["derive"] } -env_logger = "0.10.0" futures = "0.3.29" libp2p = { path = "../../libp2p", features = ["tokio", "tcp", "noise", "yamux", "autonat", "identify", "macros"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/autonat/src/bin/autonat_client.rs b/examples/autonat/src/bin/autonat_client.rs index e92be18c279..b071e717731 100644 --- a/examples/autonat/src/bin/autonat_client.rs +++ b/examples/autonat/src/bin/autonat_client.rs @@ -29,6 +29,7 @@ use libp2p::{autonat, identify, identity, noise, tcp, yamux, PeerId}; use std::error::Error; use std::net::Ipv4Addr; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[derive(Debug, Parser)] #[clap(name = "libp2p autonat")] @@ -45,7 +46,9 @@ struct Opt { #[tokio::main] async fn main() -> Result<(), Box> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let opt = Opt::parse(); diff --git a/examples/autonat/src/bin/autonat_server.rs b/examples/autonat/src/bin/autonat_server.rs index 00ccd641da8..d1c0c005861 100644 --- a/examples/autonat/src/bin/autonat_server.rs +++ b/examples/autonat/src/bin/autonat_server.rs @@ -27,6 +27,7 @@ use libp2p::swarm::{NetworkBehaviour, SwarmEvent}; use libp2p::{autonat, identify, identity, noise, tcp, yamux}; use std::error::Error; use std::net::Ipv4Addr; +use tracing_subscriber::EnvFilter; #[derive(Debug, Parser)] #[clap(name = "libp2p autonat")] @@ -37,7 +38,9 @@ struct Opt { #[tokio::main] async fn main() -> Result<(), Box> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let opt = Opt::parse(); diff --git a/examples/browser-webrtc/Cargo.toml b/examples/browser-webrtc/Cargo.toml index cc2017e6e2c..e18f6d9c531 100644 --- a/examples/browser-webrtc/Cargo.toml +++ b/examples/browser-webrtc/Cargo.toml @@ -17,10 +17,10 @@ crate-type = ["cdylib"] [dependencies] anyhow = "1.0.72" -env_logger = "0.10" futures = "0.3.29" -log = "0.4" rand = "0.8" +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [target.'cfg(not(target_arch = "wasm32"))'.dependencies] axum = "0.6.19" diff --git a/examples/browser-webrtc/src/lib.rs b/examples/browser-webrtc/src/lib.rs index 062a7978a01..609d72479c4 100644 --- a/examples/browser-webrtc/src/lib.rs +++ b/examples/browser-webrtc/src/lib.rs @@ -28,13 +28,13 @@ pub async fn run(libp2p_endpoint: String) -> Result<(), JsError> { .build(); let addr = libp2p_endpoint.parse::()?; - log::info!("Dialing {addr}"); + tracing::info!("Dialing {addr}"); swarm.dial(addr)?; loop { match swarm.next().await.unwrap() { SwarmEvent::Behaviour(ping::Event { result: Err(e), .. }) => { - log::error!("Ping failed: {:?}", e); + tracing::error!("Ping failed: {:?}", e); break; } @@ -43,10 +43,10 @@ pub async fn run(libp2p_endpoint: String) -> Result<(), JsError> { result: Ok(rtt), .. }) => { - log::info!("Ping successful: RTT: {rtt:?}, from {peer}"); + tracing::info!("Ping successful: RTT: {rtt:?}, from {peer}"); body.append_p(&format!("RTT: {rtt:?} at {}", Date::new_0().to_string()))?; } - evt => log::info!("Swarm event: {:?}", evt), + evt => tracing::info!("Swarm event: {:?}", evt), } } diff --git a/examples/browser-webrtc/src/main.rs b/examples/browser-webrtc/src/main.rs index 4ee86cd1229..97d1ab30250 100644 --- a/examples/browser-webrtc/src/main.rs +++ b/examples/browser-webrtc/src/main.rs @@ -22,10 +22,9 @@ use tower_http::cors::{Any, CorsLayer}; #[tokio::main] async fn main() -> anyhow::Result<()> { - env_logger::builder() - .parse_filters("browser_webrtc_example=debug,libp2p_webrtc=info,libp2p_ping=debug") - .parse_default_env() - .init(); + let _ = tracing_subscriber::fmt() + .with_env_filter("browser_webrtc_example=debug,libp2p_webrtc=info,libp2p_ping=debug") + .try_init(); let mut swarm = libp2p::SwarmBuilder::with_new_identity() .with_tokio() @@ -56,11 +55,13 @@ async fn main() -> anyhow::Result<()> { .iter() .any(|e| e == Protocol::Ip4(Ipv4Addr::LOCALHOST)) { - log::debug!("Ignoring localhost address to make sure the example works in Firefox"); + tracing::debug!( + "Ignoring localhost address to make sure the example works in Firefox" + ); continue; } - log::info!("Listening on: {address}"); + tracing::info!(%address, "Listening"); break address; } @@ -74,7 +75,7 @@ async fn main() -> anyhow::Result<()> { loop { tokio::select! { swarm_event = swarm.next() => { - log::trace!("Swarm Event: {:?}", swarm_event) + tracing::trace!(?swarm_event) }, _ = tokio::signal::ctrl_c() => { break; @@ -110,7 +111,7 @@ pub(crate) async fn serve(libp2p_transport: Multiaddr) { let addr = SocketAddr::new(listen_addr.into(), 8080); - log::info!("Serving client files at http://{addr}"); + tracing::info!(url=%format!("http://{addr}"), "Serving client files at url"); axum::Server::bind(&addr) .serve(server.into_make_service()) @@ -141,7 +142,7 @@ async fn get_index( /// Serves the static files generated by `wasm-pack`. async fn get_static_file(Path(path): Path) -> Result { - log::debug!("Serving static file: {path}"); + tracing::debug!(file_path=%path, "Serving static file"); let content = StaticFiles::get(&path).ok_or(StatusCode::NOT_FOUND)?.data; let content_type = mime_guess::from_path(path) diff --git a/examples/chat/Cargo.toml b/examples/chat/Cargo.toml index 7973b56eb47..b5af806501b 100644 --- a/examples/chat/Cargo.toml +++ b/examples/chat/Cargo.toml @@ -11,9 +11,10 @@ release = false [dependencies] tokio = { version = "1.33", features = ["full"] } async-trait = "0.1" -env_logger = "0.10.0" futures = "0.3.29" libp2p = { path = "../../libp2p", features = [ "tokio", "gossipsub", "mdns", "noise", "macros", "tcp", "yamux", "quic"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/chat/src/main.rs b/examples/chat/src/main.rs index 0a261873f35..24f8b19d0c4 100644 --- a/examples/chat/src/main.rs +++ b/examples/chat/src/main.rs @@ -27,6 +27,7 @@ use std::error::Error; use std::hash::{Hash, Hasher}; use std::time::Duration; use tokio::{io, io::AsyncBufReadExt, select}; +use tracing_subscriber::EnvFilter; // We create a custom network behaviour that combines Gossipsub and Mdns. #[derive(NetworkBehaviour)] @@ -37,6 +38,10 @@ struct MyBehaviour { #[tokio::main] async fn main() -> Result<(), Box> { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + let mut swarm = libp2p::SwarmBuilder::with_new_identity() .with_tokio() .with_tcp( diff --git a/examples/dcutr/Cargo.toml b/examples/dcutr/Cargo.toml index 994eed0283e..c6704ab0c03 100644 --- a/examples/dcutr/Cargo.toml +++ b/examples/dcutr/Cargo.toml @@ -10,12 +10,13 @@ release = false [dependencies] clap = { version = "4.4.7", features = ["derive"] } -env_logger = "0.10.0" futures = "0.3.29" futures-timer = "3.0" libp2p = { path = "../../libp2p", features = [ "dns", "dcutr", "identify", "macros", "noise", "ping", "quic", "relay", "rendezvous", "tcp", "tokio", "yamux"] } log = "0.4" tokio = { version = "1.29", features = ["macros", "net", "rt", "signal"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/dcutr/src/main.rs b/examples/dcutr/src/main.rs index 6a87e351e02..91beaa02c67 100644 --- a/examples/dcutr/src/main.rs +++ b/examples/dcutr/src/main.rs @@ -28,9 +28,9 @@ use libp2p::{ swarm::{NetworkBehaviour, SwarmEvent}, tcp, yamux, PeerId, }; -use log::info; use std::error::Error; use std::str::FromStr; +use tracing_subscriber::EnvFilter; #[derive(Debug, Parser)] #[clap(name = "libp2p DCUtR client")] @@ -71,7 +71,9 @@ impl FromStr for Mode { #[tokio::main] async fn main() -> Result<(), Box> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let opts = Opts::parse(); @@ -120,7 +122,7 @@ async fn main() -> Result<(), Box> { event = swarm.next() => { match event.unwrap() { SwarmEvent::NewListenAddr { address, .. } => { - info!("Listening on {:?}", address); + tracing::info!(%address, "Listening on address"); } event => panic!("{event:?}"), } @@ -149,14 +151,14 @@ async fn main() -> Result<(), Box> { SwarmEvent::Behaviour(BehaviourEvent::Identify(identify::Event::Sent { .. })) => { - info!("Told relay its public address."); + tracing::info!("Told relay its public address"); told_relay_observed_addr = true; } SwarmEvent::Behaviour(BehaviourEvent::Identify(identify::Event::Received { info: identify::Info { observed_addr, .. }, .. })) => { - info!("Relay told us our observed address: {observed_addr}"); + tracing::info!(address=%observed_addr, "Relay told us our observed address"); learned_observed_addr = true; } event => panic!("{event:?}"), @@ -189,31 +191,31 @@ async fn main() -> Result<(), Box> { loop { match swarm.next().await.unwrap() { SwarmEvent::NewListenAddr { address, .. } => { - info!("Listening on {:?}", address); + tracing::info!(%address, "Listening on address"); } SwarmEvent::Behaviour(BehaviourEvent::RelayClient( relay::client::Event::ReservationReqAccepted { .. }, )) => { assert!(opts.mode == Mode::Listen); - info!("Relay accepted our reservation request."); + tracing::info!("Relay accepted our reservation request"); } SwarmEvent::Behaviour(BehaviourEvent::RelayClient(event)) => { - info!("{:?}", event) + tracing::info!(?event) } SwarmEvent::Behaviour(BehaviourEvent::Dcutr(event)) => { - info!("{:?}", event) + tracing::info!(?event) } SwarmEvent::Behaviour(BehaviourEvent::Identify(event)) => { - info!("{:?}", event) + tracing::info!(?event) } SwarmEvent::Behaviour(BehaviourEvent::Ping(_)) => {} SwarmEvent::ConnectionEstablished { peer_id, endpoint, .. } => { - info!("Established connection to {:?} via {:?}", peer_id, endpoint); + tracing::info!(peer=%peer_id, ?endpoint, "Established new connection"); } SwarmEvent::OutgoingConnectionError { peer_id, error, .. } => { - info!("Outgoing connection error to {:?}: {:?}", peer_id, error); + tracing::info!(peer=?peer_id, "Outgoing connection failed: {error}"); } _ => {} } diff --git a/examples/distributed-key-value-store/Cargo.toml b/examples/distributed-key-value-store/Cargo.toml index 6fdc0ec72b8..dc084b30091 100644 --- a/examples/distributed-key-value-store/Cargo.toml +++ b/examples/distributed-key-value-store/Cargo.toml @@ -11,9 +11,10 @@ release = false [dependencies] async-std = { version = "1.12", features = ["attributes"] } async-trait = "0.1" -env_logger = "0.10" futures = "0.3.29" libp2p = { path = "../../libp2p", features = [ "async-std", "dns", "kad", "mdns", "noise", "macros", "tcp", "yamux"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/distributed-key-value-store/src/main.rs b/examples/distributed-key-value-store/src/main.rs index b8ecd059fc8..1843520838b 100644 --- a/examples/distributed-key-value-store/src/main.rs +++ b/examples/distributed-key-value-store/src/main.rs @@ -31,10 +31,13 @@ use libp2p::{ tcp, yamux, }; use std::error::Error; +use tracing_subscriber::EnvFilter; #[async_std::main] async fn main() -> Result<(), Box> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); // We create a custom network behaviour that combines Kademlia and mDNS. #[derive(NetworkBehaviour)] diff --git a/examples/file-sharing/Cargo.toml b/examples/file-sharing/Cargo.toml index 22599f5fa38..d0f394fc3df 100644 --- a/examples/file-sharing/Cargo.toml +++ b/examples/file-sharing/Cargo.toml @@ -13,9 +13,10 @@ serde = { version = "1.0", features = ["derive"] } async-std = { version = "1.12", features = ["attributes"] } clap = { version = "4.4.7", features = ["derive"] } either = "1.9" -env_logger = "0.10" futures = "0.3.29" libp2p = { path = "../../libp2p", features = [ "async-std", "cbor", "dns", "kad", "noise", "macros", "request-response", "tcp", "websocket", "yamux"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } void = "1.0.2" [lints] diff --git a/examples/file-sharing/src/main.rs b/examples/file-sharing/src/main.rs index 4b6d368fc47..ad1a12b3b02 100644 --- a/examples/file-sharing/src/main.rs +++ b/examples/file-sharing/src/main.rs @@ -31,10 +31,13 @@ use libp2p::{core::Multiaddr, multiaddr::Protocol}; use std::error::Error; use std::io::Write; use std::path::PathBuf; +use tracing_subscriber::EnvFilter; #[async_std::main] async fn main() -> Result<(), Box> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let opt = Opt::parse(); diff --git a/examples/identify/Cargo.toml b/examples/identify/Cargo.toml index ac36290e7a2..48449636c61 100644 --- a/examples/identify/Cargo.toml +++ b/examples/identify/Cargo.toml @@ -11,9 +11,10 @@ release = false [dependencies] async-std = { version = "1.12", features = ["attributes"] } async-trait = "0.1" -env_logger = "0.10" futures = "0.3.29" libp2p = { path = "../../libp2p", features = ["async-std", "dns", "dcutr", "identify", "macros", "noise", "ping", "relay", "rendezvous", "tcp", "tokio","yamux"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/identify/src/main.rs b/examples/identify/src/main.rs index d6be9cb9435..3c40addbcf8 100644 --- a/examples/identify/src/main.rs +++ b/examples/identify/src/main.rs @@ -23,10 +23,13 @@ use futures::StreamExt; use libp2p::{core::multiaddr::Multiaddr, identify, noise, swarm::SwarmEvent, tcp, yamux}; use std::error::Error; +use tracing_subscriber::EnvFilter; #[async_std::main] async fn main() -> Result<(), Box> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut swarm = libp2p::SwarmBuilder::with_new_identity() .with_async_std() diff --git a/examples/ipfs-kad/Cargo.toml b/examples/ipfs-kad/Cargo.toml index f57d158b1e6..537b82c24bf 100644 --- a/examples/ipfs-kad/Cargo.toml +++ b/examples/ipfs-kad/Cargo.toml @@ -16,6 +16,8 @@ env_logger = "0.10" futures = "0.3.29" anyhow = "1.0.75" libp2p = { path = "../../libp2p", features = [ "tokio", "dns", "kad", "noise", "tcp", "yamux", "rsa"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/ipfs-kad/src/main.rs b/examples/ipfs-kad/src/main.rs index dcb0ef95335..0d11bdd851a 100644 --- a/examples/ipfs-kad/src/main.rs +++ b/examples/ipfs-kad/src/main.rs @@ -28,6 +28,7 @@ use anyhow::{bail, Result}; use clap::Parser; use futures::StreamExt; use libp2p::{bytes::BufMut, identity, kad, noise, swarm::SwarmEvent, tcp, yamux, PeerId}; +use tracing_subscriber::EnvFilter; const BOOTNODES: [&str; 4] = [ "QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", @@ -38,7 +39,9 @@ const BOOTNODES: [&str; 4] = [ #[tokio::main] async fn main() -> Result<()> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); // Create a random key for ourselves. let local_key = identity::Keypair::generate_ed25519(); diff --git a/examples/ipfs-private/Cargo.toml b/examples/ipfs-private/Cargo.toml index 5a8bbd79b63..20cafabe079 100644 --- a/examples/ipfs-private/Cargo.toml +++ b/examples/ipfs-private/Cargo.toml @@ -12,9 +12,10 @@ release = false tokio = { version = "1.33", features = ["rt-multi-thread", "macros", "io-std"] } async-trait = "0.1" either = "1.9" -env_logger = "0.10" futures = "0.3.29" libp2p = { path = "../../libp2p", features = [ "tokio", "gossipsub", "dns", "identify", "kad", "macros", "noise", "ping", "pnet", "tcp", "websocket", "yamux"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/ipfs-private/src/main.rs b/examples/ipfs-private/src/main.rs index 861648fecdd..12bd985cdf0 100644 --- a/examples/ipfs-private/src/main.rs +++ b/examples/ipfs-private/src/main.rs @@ -33,6 +33,7 @@ use libp2p::{ }; use std::{env, error::Error, fs, path::Path, str::FromStr}; use tokio::{io, io::AsyncBufReadExt, select}; +use tracing_subscriber::EnvFilter; /// Get the current ipfs repo path, either from the IPFS_PATH environment variable or /// from the default $HOME/.ipfs @@ -87,7 +88,9 @@ fn parse_legacy_multiaddr(text: &str) -> Result> { #[tokio::main] async fn main() -> Result<(), Box> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let ipfs_path = get_ipfs_path(); println!("using IPFS_PATH {ipfs_path:?}"); diff --git a/examples/metrics/Cargo.toml b/examples/metrics/Cargo.toml index 2cb904172d9..c8f74a17ebd 100644 --- a/examples/metrics/Cargo.toml +++ b/examples/metrics/Cargo.toml @@ -9,13 +9,17 @@ license = "MIT" release = false [dependencies] -env_logger = "0.10.0" futures = "0.3.29" hyper = { version = "0.14", features = ["server", "tcp", "http1"] } -libp2p = { path = "../../libp2p", features = ["async-std", "metrics", "ping", "noise", "identify", "tcp", "yamux", "macros"] } -log = "0.4.20" -tokio = { version = "1", features = ["rt-multi-thread"] } +libp2p = { path = "../../libp2p", features = ["tokio", "metrics", "ping", "noise", "identify", "tcp", "yamux", "macros"] } +opentelemetry = { version = "0.20.0", features = ["rt-tokio", "metrics"] } +opentelemetry-otlp = { version = "0.13.0", features = ["metrics"]} +opentelemetry_api = "0.20.0" prometheus-client = { workspace = true } +tokio = { version = "1", features = ["full"] } +tracing = "0.1.37" +tracing-opentelemetry = "0.21.0" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/metrics/README.md b/examples/metrics/README.md index fc73cbd7410..160536985f1 100644 --- a/examples/metrics/README.md +++ b/examples/metrics/README.md @@ -1,6 +1,6 @@ ## Description -The example showcases how to run a p2p network with **libp2p** and collect metrics using `libp2p-metrics`. +The example showcases how to run a p2p network with **libp2p** and collect metrics using `libp2p-metrics` as well as span data via `opentelemetry`. It sets up multiple nodes in the network and measures various metrics, such as `libp2p_ping`, to evaluate the network's performance. ## Usage @@ -34,6 +34,45 @@ To run the example, follow these steps: After executing the command, you should see a long list of metrics printed to the terminal. Make sure to check the `libp2p_ping` metrics, which should have a value greater than zero (`>0`). +## Opentelemetry + +To see the span data collected as part of the `Swarm`s activity, start up an opentelemetry collector: + +```sh +docker compose up +``` + +Then, configure tracing to output spans: + +```shell +export RUST_LOG=info,[ConnectionHandler::poll]=trace,[NetworkBehaviour::poll]=trace +``` + +Next, (re)-start the two example for it to connect to the OTEL collector. +Finally, open the Jaeger UI in a browser and explore the spans: http://localhost:16686. + +### Filtering spans + +For a precise documentation, please see the following documentation in tracing: . + +`rust-libp2p` consistently applies spans to the following functions: + +- `ConnectionHandler::poll` implementations +- `NetworkBehaviour::poll` implementations + +The above spans are all called exactly that: `ConnectionHandler::poll` and `NetworkBehaviour::poll`. +You can activate _all_ of them by setting: + +``` +RUST_LOG=[ConnectionHandler::poll]=trace +``` + +If you just wanted to see the spans of the `libp2p_ping` crate, you can filter like this: + +``` +RUST_LOG=libp2p_ping[ConnectionHandler::poll]=trace +``` + ## Conclusion This example demonstrates how to utilize the `libp2p-metrics` crate to collect and analyze metrics in a libp2p network. diff --git a/examples/metrics/docker-compose.yml b/examples/metrics/docker-compose.yml new file mode 100644 index 00000000000..06d8d5becfe --- /dev/null +++ b/examples/metrics/docker-compose.yml @@ -0,0 +1,23 @@ +version: "2" +services: + # Jaeger + jaeger-all-in-one: + image: jaegertracing/all-in-one:latest + restart: always + ports: + - "16686:16686" + - "14268" + - "14250" + + # Collector + otel-collector: + image: otel/opentelemetry-collector:0.88.0 + restart: always + command: ["--config=/etc/otel-collector-config.yaml"] + volumes: + - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml + ports: + - "13133:13133" # health_check extension + - "4317:4317" # OTLP gRPC receiver + depends_on: + - jaeger-all-in-one diff --git a/examples/metrics/otel-collector-config.yaml b/examples/metrics/otel-collector-config.yaml new file mode 100644 index 00000000000..8755848cd6e --- /dev/null +++ b/examples/metrics/otel-collector-config.yaml @@ -0,0 +1,25 @@ +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + +exporters: + debug: + otlp: + endpoint: jaeger-all-in-one:4317 + tls: + insecure: true + +processors: + batch: + +service: + telemetry: + logs: + level: "debug" + pipelines: + traces: + receivers: [otlp] + processors: [batch] + exporters: [debug, otlp] diff --git a/examples/metrics/src/http_service.rs b/examples/metrics/src/http_service.rs index 46cb7aacb84..8c77d724ea3 100644 --- a/examples/metrics/src/http_service.rs +++ b/examples/metrics/src/http_service.rs @@ -21,7 +21,6 @@ use hyper::http::StatusCode; use hyper::service::Service; use hyper::{Body, Method, Request, Response, Server}; -use log::{error, info}; use prometheus_client::encoding::text::encode; use prometheus_client::registry::Registry; use std::future::Future; @@ -33,18 +32,14 @@ const METRICS_CONTENT_TYPE: &str = "application/openmetrics-text;charset=utf-8;v pub(crate) async fn metrics_server(registry: Registry) -> Result<(), std::io::Error> { // Serve on localhost. - let addr = ([127, 0, 0, 1], 8080).into(); - - // Use the tokio runtime to run the hyper server. - let rt = tokio::runtime::Runtime::new()?; - rt.block_on(async { - let server = Server::bind(&addr).serve(MakeMetricService::new(registry)); - info!("Metrics server on http://{}/metrics", server.local_addr()); - if let Err(e) = server.await { - error!("server error: {}", e); - } - Ok(()) - }) + let addr = ([127, 0, 0, 1], 0).into(); + + let server = Server::bind(&addr).serve(MakeMetricService::new(registry)); + tracing::info!(metrics_server=%format!("http://{}/metrics", server.local_addr())); + if let Err(e) = server.await { + tracing::error!("server error: {}", e); + } + Ok(()) } pub(crate) struct MetricService { diff --git a/examples/metrics/src/main.rs b/examples/metrics/src/main.rs index 09d4f7a5941..18db1084d2f 100644 --- a/examples/metrics/src/main.rs +++ b/examples/metrics/src/main.rs @@ -20,25 +20,28 @@ #![doc = include_str!("../README.md")] -use env_logger::Env; -use futures::{executor::block_on, StreamExt}; +use futures::StreamExt; use libp2p::core::Multiaddr; use libp2p::metrics::{Metrics, Recorder}; use libp2p::swarm::{NetworkBehaviour, SwarmEvent}; use libp2p::{identify, identity, noise, ping, tcp, yamux}; -use log::info; +use opentelemetry::sdk; +use opentelemetry_api::KeyValue; use prometheus_client::registry::Registry; use std::error::Error; -use std::thread; use std::time::Duration; +use tracing_subscriber::layer::SubscriberExt; +use tracing_subscriber::util::SubscriberInitExt; +use tracing_subscriber::{EnvFilter, Layer}; mod http_service; -fn main() -> Result<(), Box> { - env_logger::Builder::from_env(Env::default().default_filter_or("info")).init(); +#[tokio::main] +async fn main() -> Result<(), Box> { + setup_tracing()?; let mut swarm = libp2p::SwarmBuilder::with_new_identity() - .with_async_std() + .with_tokio() .with_tcp( tcp::Config::default(), noise::Config::new, @@ -53,31 +56,52 @@ fn main() -> Result<(), Box> { if let Some(addr) = std::env::args().nth(1) { let remote: Multiaddr = addr.parse()?; swarm.dial(remote)?; - info!("Dialed {}", addr) + tracing::info!(address=%addr, "Dialed address") } let mut metric_registry = Registry::default(); let metrics = Metrics::new(&mut metric_registry); - thread::spawn(move || block_on(http_service::metrics_server(metric_registry))); + tokio::spawn(http_service::metrics_server(metric_registry)); - block_on(async { - loop { - match swarm.select_next_some().await { - SwarmEvent::Behaviour(BehaviourEvent::Ping(ping_event)) => { - info!("{:?}", ping_event); - metrics.record(&ping_event); - } - SwarmEvent::Behaviour(BehaviourEvent::Identify(identify_event)) => { - info!("{:?}", identify_event); - metrics.record(&identify_event); - } - swarm_event => { - info!("{:?}", swarm_event); - metrics.record(&swarm_event); - } + loop { + match swarm.select_next_some().await { + SwarmEvent::Behaviour(BehaviourEvent::Ping(ping_event)) => { + tracing::info!(?ping_event); + metrics.record(&ping_event); + } + SwarmEvent::Behaviour(BehaviourEvent::Identify(identify_event)) => { + tracing::info!(?identify_event); + metrics.record(&identify_event); + } + swarm_event => { + tracing::info!(?swarm_event); + metrics.record(&swarm_event); } } - }); + } +} + +fn setup_tracing() -> Result<(), Box> { + let tracer = opentelemetry_otlp::new_pipeline() + .tracing() + .with_exporter(opentelemetry_otlp::new_exporter().tonic()) + .with_trace_config( + sdk::trace::Config::default().with_resource(sdk::Resource::new(vec![KeyValue::new( + "service.name", + "libp2p", + )])), + ) + .install_batch(opentelemetry::runtime::Tokio)?; + + tracing_subscriber::registry() + .with(tracing_subscriber::fmt::layer().with_filter(EnvFilter::from_default_env())) + .with( + tracing_opentelemetry::layer() + .with_tracer(tracer) + .with_filter(EnvFilter::from_default_env()), + ) + .try_init()?; + Ok(()) } diff --git a/examples/ping/Cargo.toml b/examples/ping/Cargo.toml index dd32c3744d8..58cee54409e 100644 --- a/examples/ping/Cargo.toml +++ b/examples/ping/Cargo.toml @@ -9,10 +9,11 @@ license = "MIT" release = false [dependencies] -env_logger = "0.10.0" futures = "0.3.29" libp2p = { path = "../../libp2p", features = ["noise", "ping", "tcp", "tokio", "yamux"] } tokio = { version = "1.33.0", features = ["full"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/ping/src/main.rs b/examples/ping/src/main.rs index d89415132e5..911b0384f89 100644 --- a/examples/ping/src/main.rs +++ b/examples/ping/src/main.rs @@ -23,9 +23,14 @@ use futures::prelude::*; use libp2p::{noise, ping, swarm::SwarmEvent, tcp, yamux, Multiaddr}; use std::{error::Error, time::Duration}; +use tracing_subscriber::EnvFilter; #[tokio::main] async fn main() -> Result<(), Box> { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + let mut swarm = libp2p::SwarmBuilder::with_new_identity() .with_tokio() .with_tcp( diff --git a/examples/relay-server/Cargo.toml b/examples/relay-server/Cargo.toml index 5da0d55b1d4..7017bfdad64 100644 --- a/examples/relay-server/Cargo.toml +++ b/examples/relay-server/Cargo.toml @@ -12,9 +12,10 @@ release = false clap = { version = "4.4.7", features = ["derive"] } async-std = { version = "1.12", features = ["attributes"] } async-trait = "0.1" -env_logger = "0.10.0" -futures = "0.3.29" +futures = "0.3.2" libp2p = { path = "../../libp2p", features = [ "async-std", "noise", "macros", "ping", "tcp", "identify", "yamux", "relay", "quic"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/relay-server/src/main.rs b/examples/relay-server/src/main.rs index 2f86f9b938e..bf5817454f8 100644 --- a/examples/relay-server/src/main.rs +++ b/examples/relay-server/src/main.rs @@ -33,9 +33,12 @@ use libp2p::{ }; use std::error::Error; use std::net::{Ipv4Addr, Ipv6Addr}; +use tracing_subscriber::EnvFilter; fn main() -> Result<(), Box> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let opt = Opt::parse(); diff --git a/examples/rendezvous/Cargo.toml b/examples/rendezvous/Cargo.toml index 0c2c32f0edb..f20e5f519ae 100644 --- a/examples/rendezvous/Cargo.toml +++ b/examples/rendezvous/Cargo.toml @@ -11,11 +11,11 @@ release = false [dependencies] async-std = { version = "1.12", features = ["attributes"] } async-trait = "0.1" -env_logger = "0.10.0" futures = "0.3.29" libp2p = { path = "../../libp2p", features = [ "async-std", "identify", "macros", "noise", "ping", "rendezvous", "tcp", "tokio", "yamux"] } -log = "0.4" tokio = { version = "1.33", features = ["rt-multi-thread", "macros", "time"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/rendezvous/src/bin/rzv-discover.rs b/examples/rendezvous/src/bin/rzv-discover.rs index 42a5a20b6ad..edd3d10a0ce 100644 --- a/examples/rendezvous/src/bin/rzv-discover.rs +++ b/examples/rendezvous/src/bin/rzv-discover.rs @@ -27,12 +27,15 @@ use libp2p::{ }; use std::error::Error; use std::time::Duration; +use tracing_subscriber::EnvFilter; const NAMESPACE: &str = "rendezvous"; #[tokio::main] async fn main() -> Result<(), Box> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let rendezvous_point_address = "/ip4/127.0.0.1/tcp/62649".parse::().unwrap(); let rendezvous_point = "12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN" @@ -62,7 +65,7 @@ async fn main() -> Result<(), Box> { tokio::select! { event = swarm.select_next_some() => match event { SwarmEvent::ConnectionEstablished { peer_id, .. } if peer_id == rendezvous_point => { - log::info!( + tracing::info!( "Connected to rendezvous point, discovering nodes in '{}' namespace ...", NAMESPACE ); @@ -84,7 +87,7 @@ async fn main() -> Result<(), Box> { for registration in registrations { for address in registration.record.addresses() { let peer = registration.record.peer_id(); - log::info!("Discovered peer {} at {}", peer, address); + tracing::info!(%peer, %address, "Discovered peer"); let p2p_suffix = Protocol::P2p(peer); let address_with_p2p = @@ -103,10 +106,10 @@ async fn main() -> Result<(), Box> { result: Ok(rtt), .. })) if peer != rendezvous_point => { - log::info!("Ping to {} is {}ms", peer, rtt.as_millis()) + tracing::info!(%peer, "Ping is {}ms", rtt.as_millis()) } other => { - log::debug!("Unhandled {:?}", other); + tracing::debug!("Unhandled {:?}", other); } }, _ = discover_tick.tick(), if cookie.is_some() => diff --git a/examples/rendezvous/src/bin/rzv-identify.rs b/examples/rendezvous/src/bin/rzv-identify.rs index be644dbb9f8..1d545592829 100644 --- a/examples/rendezvous/src/bin/rzv-identify.rs +++ b/examples/rendezvous/src/bin/rzv-identify.rs @@ -25,10 +25,13 @@ use libp2p::{ tcp, yamux, Multiaddr, }; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[tokio::main] async fn main() { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let rendezvous_point_address = "/ip4/127.0.0.1/tcp/62649".parse::().unwrap(); let rendezvous_point = "12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN" @@ -62,14 +65,14 @@ async fn main() { while let Some(event) = swarm.next().await { match event { SwarmEvent::NewListenAddr { address, .. } => { - log::info!("Listening on {}", address); + tracing::info!("Listening on {}", address); } SwarmEvent::ConnectionClosed { peer_id, cause: Some(error), .. } if peer_id == rendezvous_point => { - log::error!("Lost connection to rendezvous point {}", error); + tracing::error!("Lost connection to rendezvous point {}", error); } // once `/identify` did its job, we know our external address and can register SwarmEvent::Behaviour(MyBehaviourEvent::Identify(identify::Event::Received { @@ -80,7 +83,7 @@ async fn main() { rendezvous_point, None, ) { - log::error!("Failed to register: {error}"); + tracing::error!("Failed to register: {error}"); return; } } @@ -91,7 +94,7 @@ async fn main() { rendezvous_node, }, )) => { - log::info!( + tracing::info!( "Registered for namespace '{}' at rendezvous point {} for the next {} seconds", namespace, rendezvous_node, @@ -105,7 +108,7 @@ async fn main() { error, }, )) => { - log::error!( + tracing::error!( "Failed to register: rendezvous_node={}, namespace={}, error_code={:?}", rendezvous_node, namespace, @@ -118,10 +121,10 @@ async fn main() { result: Ok(rtt), .. })) if peer != rendezvous_point => { - log::info!("Ping to {} is {}ms", peer, rtt.as_millis()) + tracing::info!("Ping to {} is {}ms", peer, rtt.as_millis()) } other => { - log::debug!("Unhandled {:?}", other); + tracing::debug!("Unhandled {:?}", other); } } } diff --git a/examples/rendezvous/src/bin/rzv-register.rs b/examples/rendezvous/src/bin/rzv-register.rs index 928dcdd1625..bd848238d4a 100644 --- a/examples/rendezvous/src/bin/rzv-register.rs +++ b/examples/rendezvous/src/bin/rzv-register.rs @@ -25,10 +25,13 @@ use libp2p::{ tcp, yamux, Multiaddr, }; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[tokio::main] async fn main() { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let rendezvous_point_address = "/ip4/127.0.0.1/tcp/62649".parse::().unwrap(); let rendezvous_point = "12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN" @@ -61,14 +64,14 @@ async fn main() { while let Some(event) = swarm.next().await { match event { SwarmEvent::NewListenAddr { address, .. } => { - log::info!("Listening on {}", address); + tracing::info!("Listening on {}", address); } SwarmEvent::ConnectionClosed { peer_id, cause: Some(error), .. } if peer_id == rendezvous_point => { - log::error!("Lost connection to rendezvous point {}", error); + tracing::error!("Lost connection to rendezvous point {}", error); } SwarmEvent::ConnectionEstablished { peer_id, .. } if peer_id == rendezvous_point => { if let Err(error) = swarm.behaviour_mut().rendezvous.register( @@ -76,10 +79,10 @@ async fn main() { rendezvous_point, None, ) { - log::error!("Failed to register: {error}"); + tracing::error!("Failed to register: {error}"); return; } - log::info!("Connection established with rendezvous point {}", peer_id); + tracing::info!("Connection established with rendezvous point {}", peer_id); } // once `/identify` did its job, we know our external address and can register SwarmEvent::Behaviour(MyBehaviourEvent::Rendezvous( @@ -89,7 +92,7 @@ async fn main() { rendezvous_node, }, )) => { - log::info!( + tracing::info!( "Registered for namespace '{}' at rendezvous point {} for the next {} seconds", namespace, rendezvous_node, @@ -103,7 +106,7 @@ async fn main() { error, }, )) => { - log::error!( + tracing::error!( "Failed to register: rendezvous_node={}, namespace={}, error_code={:?}", rendezvous_node, namespace, @@ -116,10 +119,10 @@ async fn main() { result: Ok(rtt), .. })) if peer != rendezvous_point => { - log::info!("Ping to {} is {}ms", peer, rtt.as_millis()) + tracing::info!("Ping to {} is {}ms", peer, rtt.as_millis()) } other => { - log::debug!("Unhandled {:?}", other); + tracing::debug!("Unhandled {:?}", other); } } } diff --git a/examples/rendezvous/src/main.rs b/examples/rendezvous/src/main.rs index a1b811f4f11..a15bc1ca2d3 100644 --- a/examples/rendezvous/src/main.rs +++ b/examples/rendezvous/src/main.rs @@ -28,10 +28,13 @@ use libp2p::{ }; use std::error::Error; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[tokio::main] async fn main() -> Result<(), Box> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); // Results in PeerID 12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN which is // used as the rendezvous point by the other peer examples. @@ -60,15 +63,15 @@ async fn main() -> Result<(), Box> { while let Some(event) = swarm.next().await { match event { SwarmEvent::ConnectionEstablished { peer_id, .. } => { - log::info!("Connected to {}", peer_id); + tracing::info!("Connected to {}", peer_id); } SwarmEvent::ConnectionClosed { peer_id, .. } => { - log::info!("Disconnected from {}", peer_id); + tracing::info!("Disconnected from {}", peer_id); } SwarmEvent::Behaviour(MyBehaviourEvent::Rendezvous( rendezvous::server::Event::PeerRegistered { peer, registration }, )) => { - log::info!( + tracing::info!( "Peer {} registered for namespace '{}'", peer, registration.namespace @@ -80,14 +83,14 @@ async fn main() -> Result<(), Box> { registrations, }, )) => { - log::info!( + tracing::info!( "Served peer {} with {} registrations", enquirer, registrations.len() ); } other => { - log::debug!("Unhandled {:?}", other); + tracing::debug!("Unhandled {:?}", other); } } } diff --git a/examples/upnp/Cargo.toml b/examples/upnp/Cargo.toml index 02110c33840..940f3dff65f 100644 --- a/examples/upnp/Cargo.toml +++ b/examples/upnp/Cargo.toml @@ -12,6 +12,7 @@ release = false tokio = { version = "1", features = ["rt-multi-thread", "macros"] } futures = "0.3.29" libp2p = { path = "../../libp2p", features = ["tokio", "dns", "macros", "noise", "ping", "tcp", "yamux", "upnp"] } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/upnp/src/main.rs b/examples/upnp/src/main.rs index c602a687db7..fd0764990d1 100644 --- a/examples/upnp/src/main.rs +++ b/examples/upnp/src/main.rs @@ -23,9 +23,14 @@ use futures::prelude::*; use libp2p::{noise, swarm::SwarmEvent, upnp, yamux, Multiaddr}; use std::error::Error; +use tracing_subscriber::EnvFilter; #[tokio::main] async fn main() -> Result<(), Box> { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + let mut swarm = libp2p::SwarmBuilder::with_new_identity() .with_tokio() .with_tcp( diff --git a/hole-punching-tests/Cargo.toml b/hole-punching-tests/Cargo.toml index 4d067117260..14e5793f141 100644 --- a/hole-punching-tests/Cargo.toml +++ b/hole-punching-tests/Cargo.toml @@ -10,7 +10,7 @@ anyhow = "1" env_logger = "0.10.0" futures = "0.3.29" libp2p = { path = "../libp2p", features = ["tokio", "dcutr", "identify", "macros", "noise", "ping", "relay", "tcp", "yamux", "quic"] } -log = "0.4" +tracing = "0.1.37" redis = { version = "0.23.0", default-features = false, features = ["tokio-comp"] } tokio = { version = "1.29.1", features = ["full"] } serde = { version = "1.0.190", features = ["derive"] } diff --git a/hole-punching-tests/src/main.rs b/hole-punching-tests/src/main.rs index 72b81f776ad..4f81cd65480 100644 --- a/hole-punching-tests/src/main.rs +++ b/hole-punching-tests/src/main.rs @@ -104,7 +104,7 @@ async fn main() -> Result<()> { _, _, ) => { - log::info!("Relay accepted our reservation request."); + tracing::info!("Relay accepted our reservation request."); redis .push(LISTEN_CLIENT_PEER_ID, swarm.local_peer_id()) @@ -118,7 +118,7 @@ async fn main() -> Result<()> { _, _, ) => { - log::info!("Successfully hole-punched to {remote_peer_id}"); + tracing::info!("Successfully hole-punched to {remote_peer_id}"); hole_punched_peer_connection = Some(connection_id); } @@ -144,7 +144,7 @@ async fn main() -> Result<()> { _, _, ) => { - log::info!("Failed to hole-punched to {remote_peer_id}"); + tracing::info!("Failed to hole-punched to {remote_peer_id}"); return Err(anyhow::Error::new(error)); } ( @@ -225,7 +225,7 @@ async fn client_listen_on_transport( listen_addresses += 1; } - log::info!("Listening on {address}"); + tracing::info!("Listening on {address}"); } } Ok(()) @@ -292,7 +292,7 @@ impl RedisClient { async fn push(&mut self, key: &str, value: impl ToString) -> Result<()> { let value = value.to_string(); - log::debug!("Pushing {key}={value} to redis"); + tracing::debug!("Pushing {key}={value} to redis"); self.inner.rpush(key, value).await?; @@ -304,7 +304,7 @@ impl RedisClient { V: FromStr + fmt::Display, V::Err: std::error::Error + Send + Sync + 'static, { - log::debug!("Fetching {key} from redis"); + tracing::debug!("Fetching {key} from redis"); let value = self .inner @@ -314,7 +314,7 @@ impl RedisClient { .with_context(|| format!("Failed to get value for {key} from redis"))? .parse()?; - log::debug!("{key}={value}"); + tracing::debug!("{key}={value}"); Ok(value) } diff --git a/identity/Cargo.toml b/identity/Cargo.toml index 6e25699d9ed..e09e8b0e2b2 100644 --- a/identity/Cargo.toml +++ b/identity/Cargo.toml @@ -17,7 +17,7 @@ bs58 = { version = "0.5.0", optional = true } ed25519-dalek = { version = "2.0", optional = true } hkdf = { version = "0.12.3", optional = true } libsecp256k1 = { version = "0.7.0", optional = true } -log = "0.4" +tracing = "0.1.37" multihash = { version = "0.19.1", optional = true } p256 = { version = "0.13", default-features = false, features = [ "ecdsa", "std", "pem"], optional = true } quick-protobuf = "0.8.1" diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index 41e2181d2a9..bdfb68c0091 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -672,7 +672,7 @@ impl TryFrom for PublicKey { )?), #[cfg(not(feature = "ed25519"))] proto::KeyType::Ed25519 => { - log::debug!("support for ed25519 was disabled at compile-time"); + tracing::debug!("support for ed25519 was disabled at compile-time"); Err(DecodingError::missing_feature("ed25519")) } #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] @@ -685,7 +685,7 @@ impl TryFrom for PublicKey { } #[cfg(any(not(feature = "rsa"), target_arch = "wasm32"))] proto::KeyType::RSA => { - log::debug!("support for RSA was disabled at compile-time"); + tracing::debug!("support for RSA was disabled at compile-time"); Err(DecodingError::missing_feature("rsa")) } #[cfg(feature = "secp256k1")] @@ -695,7 +695,7 @@ impl TryFrom for PublicKey { })?), #[cfg(not(feature = "secp256k1"))] proto::KeyType::Secp256k1 => { - log::debug!("support for secp256k1 was disabled at compile-time"); + tracing::debug!("support for secp256k1 was disabled at compile-time"); Err(DecodingError::missing_feature("secp256k1")) } #[cfg(feature = "ecdsa")] @@ -706,7 +706,7 @@ impl TryFrom for PublicKey { )?), #[cfg(not(feature = "ecdsa"))] proto::KeyType::ECDSA => { - log::debug!("support for ECDSA was disabled at compile-time"); + tracing::debug!("support for ECDSA was disabled at compile-time"); Err(DecodingError::missing_feature("ecdsa")) } } diff --git a/interop-tests/Cargo.toml b/interop-tests/Cargo.toml index e49562a759a..3caad98dfa2 100644 --- a/interop-tests/Cargo.toml +++ b/interop-tests/Cargo.toml @@ -14,11 +14,11 @@ crate-type = ["cdylib", "rlib"] [dependencies] anyhow = "1" either = "1.9.0" -env_logger = "0.10.0" futures = "0.3.29" -log = "0.4" -serde = { version = "1", features = ["derive"] } rand = "0.8.5" +serde = { version = "1", features = ["derive"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [target.'cfg(not(target_arch = "wasm32"))'.dependencies] axum = "0.6" @@ -36,7 +36,7 @@ serde_json = "1" thirtyfour = "=0.32.0-rc.8" # https://github.com/stevepryde/thirtyfour/issues/169 tokio = { version = "1.33.0", features = ["full"] } tower-http = { version = "0.4", features = ["cors", "fs", "trace"] } -tracing = "0.1" +tracing = "0.1.37" tracing-subscriber = { version = "0.3", features = ["env-filter"] } [target.'cfg(target_arch = "wasm32")'.dependencies] diff --git a/interop-tests/src/arch.rs b/interop-tests/src/arch.rs index d90af53abb1..52000f90a86 100644 --- a/interop-tests/src/arch.rs +++ b/interop-tests/src/arch.rs @@ -11,7 +11,6 @@ pub(crate) mod native { use std::time::Duration; use anyhow::{bail, Context, Result}; - use env_logger::{Env, Target}; use futures::future::BoxFuture; use futures::FutureExt; use libp2p::identity::Keypair; @@ -20,15 +19,16 @@ pub(crate) mod native { use libp2p_mplex as mplex; use libp2p_webrtc as webrtc; use redis::AsyncCommands; + use tracing_subscriber::EnvFilter; use crate::{Muxer, SecProtocol, Transport}; pub(crate) type Instant = std::time::Instant; pub(crate) fn init_logger() { - env_logger::Builder::from_env(Env::default().default_filter_or("info")) - .target(Target::Stdout) - .init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); } pub(crate) fn sleep(duration: Duration) -> BoxFuture<'static, ()> { diff --git a/interop-tests/src/bin/wasm_ping.rs b/interop-tests/src/bin/wasm_ping.rs index a228b913930..8269ff064ad 100644 --- a/interop-tests/src/bin/wasm_ping.rs +++ b/interop-tests/src/bin/wasm_ping.rs @@ -15,7 +15,6 @@ use tokio::process::Child; use tokio::sync::mpsc; use tower_http::cors::CorsLayer; use tower_http::trace::TraceLayer; -use tracing::{error, warn}; use tracing_subscriber::{fmt, prelude::*, EnvFilter}; use interop_tests::{BlpopRequest, Report}; @@ -144,16 +143,17 @@ async fn redis_blpop( ) -> Result>, StatusCode> { let client = state.0.redis_client; let mut conn = client.get_async_connection().await.map_err(|e| { - warn!("Failed to connect to redis: {e}"); + tracing::warn!("Failed to connect to redis: {e}"); StatusCode::INTERNAL_SERVER_ERROR })?; let res = conn .blpop(&request.key, request.timeout as usize) .await .map_err(|e| { - warn!( - "Failed to get list elem {} within timeout {}: {e}", - request.key, request.timeout + tracing::warn!( + key=%request.key, + timeout=%request.timeout, + "Failed to get list elem key within timeout: {e}" ); StatusCode::INTERNAL_SERVER_ERROR })?; @@ -167,7 +167,7 @@ async fn post_results( request: Json>, ) -> Result<(), StatusCode> { state.0.results_tx.send(request.0).await.map_err(|_| { - error!("Failed to send results"); + tracing::error!("Failed to send results"); StatusCode::INTERNAL_SERVER_ERROR }) } diff --git a/interop-tests/src/lib.rs b/interop-tests/src/lib.rs index d48fc289d4b..0154bec51a4 100644 --- a/interop-tests/src/lib.rs +++ b/interop-tests/src/lib.rs @@ -47,7 +47,7 @@ pub async fn run_test( let (mut swarm, local_addr) = build_swarm(ip, transport, sec_protocol, muxer, build_behaviour).await?; - log::info!("Running ping test: {}", swarm.local_peer_id()); + tracing::info!(local_peer=%swarm.local_peer_id(), "Running ping test"); // See https://github.com/libp2p/rust-libp2p/issues/4071. #[cfg(not(target_arch = "wasm32"))] @@ -74,7 +74,7 @@ pub async fn run_test( let handshake_start = Instant::now(); swarm.dial(other.parse::()?)?; - log::info!("Test instance, dialing multiaddress on: {}.", other); + tracing::info!(listener=%other, "Test instance, dialing multiaddress"); let rtt = loop { if let Some(SwarmEvent::Behaviour(BehaviourEvent::Ping(ping::Event { @@ -82,7 +82,7 @@ pub async fn run_test( .. }))) = swarm.next().await { - log::info!("Ping successful: {rtt:?}"); + tracing::info!(?rtt, "Ping successful"); break rtt.as_micros() as f32 / 1000.; } }; @@ -101,9 +101,9 @@ pub async fn run_test( Some(id) => id, }; - log::info!( - "Test instance, listening for incoming connections on: {:?}.", - local_addr + tracing::info!( + address=%local_addr, + "Test instance, listening for incoming connections on address" ); loop { @@ -129,7 +129,7 @@ pub async fn run_test( loop { let event = swarm.next().await.unwrap(); - log::debug!("{event:?}"); + tracing::debug!("{event:?}"); } } .boxed(), @@ -164,7 +164,7 @@ pub async fn run_test_wasm( muxer, ) .await; - log::info!("Sending test result: {result:?}"); + tracing::info!(?result, "Sending test result"); reqwest::Client::new() .post(&format!("http://{}/results", base_url)) .json(&result.map_err(|e| e.to_string())) diff --git a/libp2p/Cargo.toml b/libp2p/Cargo.toml index 8ea8ce19b26..a47f8bfdaa1 100644 --- a/libp2p/Cargo.toml +++ b/libp2p/Cargo.toml @@ -139,14 +139,13 @@ libp2p-websocket = { workspace = true, optional = true } [dev-dependencies] async-std = { version = "1.6.2", features = ["attributes"] } async-trait = "0.1" -either = "1.8.0" -env_logger = "0.10.0" clap = { version = "4.1.6", features = ["derive"] } tokio = { version = "1.15", features = [ "io-util", "io-std", "macros", "rt", "rt-multi-thread"] } libp2p-mplex = { workspace = true } libp2p-noise = { workspace = true } libp2p-tcp = { workspace = true, features = ["tokio"] } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/libp2p/src/tutorials/ping.rs b/libp2p/src/tutorials/ping.rs index b92d7b51c8a..db515595bfc 100644 --- a/libp2p/src/tutorials/ping.rs +++ b/libp2p/src/tutorials/ping.rs @@ -56,9 +56,9 @@ //! //! [dependencies] //! libp2p = { version = "0.52", features = ["tcp", "dns", "async-std", "noise", "yamux", "websocket", "ping", "macros"] } -//! futures = "0.3" -//! env_logger = "0.10.0" -//! async-std = { version = "1.12", features = ["attributes"] } +//! futures = "0.3.21" +//! async-std = { version = "1.12.0", features = ["attributes"] } +//! tracing-subscriber = { version = "0.3", features = ["env-filter"] } //! ``` //! //! ## Network identity @@ -72,10 +72,11 @@ //! //! ```rust //! use std::error::Error; +//! use tracing_subscriber::EnvFilter; //! //! #[async_std::main] //! async fn main() -> Result<(), Box> { -//! env_logger::init(); +//! tracing_subscriber::fmt().with_env_filter(EnvFilter::from_default_env()).init(); //! //! let mut swarm = libp2p::SwarmBuilder::with_new_identity(); //! @@ -96,10 +97,11 @@ //! ```rust //! use libp2p::{identity, PeerId}; //! use std::error::Error; +//! use tracing_subscriber::EnvFilter; //! //! #[async_std::main] //! async fn main() -> Result<(), Box> { -//! env_logger::init(); +//! tracing_subscriber::fmt().with_env_filter(EnvFilter::from_default_env()).init(); //! //! let mut swarm = libp2p::SwarmBuilder::with_new_identity() //! .with_async_std() @@ -138,11 +140,12 @@ //! ```rust //! use libp2p::swarm::NetworkBehaviour; //! use libp2p::{identity, ping, PeerId}; +//! use tracing_subscriber::EnvFilter; //! use std::error::Error; //! //! #[async_std::main] //! async fn main() -> Result<(), Box> { -//! env_logger::init(); +//! tracing_subscriber::fmt().with_env_filter(EnvFilter::from_default_env()).init(); //! //! let mut swarm = libp2p::SwarmBuilder::with_new_identity() //! .with_async_std() @@ -168,10 +171,11 @@ //! use libp2p::swarm::NetworkBehaviour; //! use libp2p::{identity, ping, PeerId}; //! use std::error::Error; +//! use tracing_subscriber::EnvFilter; //! //! #[async_std::main] //! async fn main() -> Result<(), Box> { -//! env_logger::init(); +//! tracing_subscriber::fmt().with_env_filter(EnvFilter::from_default_env()).init(); //! //! let mut swarm = libp2p::SwarmBuilder::with_new_identity() //! .with_async_std() @@ -202,10 +206,11 @@ //! use libp2p::{identity, ping, PeerId}; //! use std::error::Error; //! use std::time::Duration; +//! use tracing_subscriber::EnvFilter; //! //! #[async_std::main] //! async fn main() -> Result<(), Box> { -//! env_logger::init(); +//! tracing_subscriber::fmt().with_env_filter(EnvFilter::from_default_env()).init(); //! //! let mut swarm = libp2p::SwarmBuilder::with_new_identity() //! .with_async_std() @@ -252,9 +257,12 @@ //! use libp2p::{identity, ping, Multiaddr, PeerId}; //! use std::error::Error; //! use std::time::Duration; +//! use tracing_subscriber::EnvFilter; //! //! #[async_std::main] //! async fn main() -> Result<(), Box> { +//! tracing_subscriber::fmt().with_env_filter(EnvFilter::from_default_env()).init(); +//! //! let mut swarm = libp2p::SwarmBuilder::with_new_identity() //! .with_async_std() //! .with_tcp( @@ -294,9 +302,12 @@ //! use libp2p::{identity, ping, Multiaddr, PeerId}; //! use std::error::Error; //! use std::time::Duration; +//! use tracing_subscriber::EnvFilter; //! //! #[async_std::main] //! async fn main() -> Result<(), Box> { +//! tracing_subscriber::fmt().with_env_filter(EnvFilter::from_default_env()).init(); +//! //! let mut swarm = libp2p::SwarmBuilder::with_new_identity() //! .with_async_std() //! .with_tcp( diff --git a/misc/memory-connection-limits/Cargo.toml b/misc/memory-connection-limits/Cargo.toml index bf2a0384570..ae6bb386373 100644 --- a/misc/memory-connection-limits/Cargo.toml +++ b/misc/memory-connection-limits/Cargo.toml @@ -14,8 +14,8 @@ memory-stats = { version = "1", features = ["always_use_statm"] } libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true, features = ["peerid"] } -log = "0.4" sysinfo = "0.29" +tracing = "0.1.37" void = "1" [dev-dependencies] diff --git a/misc/memory-connection-limits/src/lib.rs b/misc/memory-connection-limits/src/lib.rs index 01ff04552e7..5bc5f1068a3 100644 --- a/misc/memory-connection-limits/src/lib.rs +++ b/misc/memory-connection-limits/src/lib.rs @@ -127,7 +127,7 @@ impl Behaviour { let stats = match memory_stats::memory_stats() { Some(stats) => stats, None => { - log::warn!("Failed to retrieve process memory stats"); + tracing::warn!("Failed to retrieve process memory stats"); return; } }; diff --git a/misc/multistream-select/Cargo.toml b/misc/multistream-select/Cargo.toml index 6bd072070e7..e33478c1a08 100644 --- a/misc/multistream-select/Cargo.toml +++ b/misc/multistream-select/Cargo.toml @@ -13,18 +13,18 @@ categories = ["network-programming", "asynchronous"] [dependencies] bytes = "1" futures = "0.3" -log = "0.4" +tracing = "0.1.37" pin-project = "1.1.3" smallvec = "1.11.1" unsigned-varint = "0.7" [dev-dependencies] async-std = { version = "1.6.2", features = ["attributes"] } -env_logger = "0.10" futures_ringbuf = "0.4.0" quickcheck = { workspace = true } rand = "0.8" rw-stream-sink = { workspace = true } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/misc/multistream-select/src/dialer_select.rs b/misc/multistream-select/src/dialer_select.rs index 8caa7b0e0a2..83bb4909041 100644 --- a/misc/multistream-select/src/dialer_select.rs +++ b/misc/multistream-select/src/dialer_select.rs @@ -131,7 +131,7 @@ where if let Err(err) = Pin::new(&mut io).start_send(Message::Protocol(p.clone())) { return Poll::Ready(Err(From::from(err))); } - log::debug!("Dialer: Proposed protocol: {}", p); + tracing::debug!(protocol=%p, "Dialer: Proposed protocol"); if this.protocols.peek().is_some() { *this.state = State::FlushProtocol { io, protocol } @@ -143,7 +143,7 @@ where // the dialer supports for this negotiation. Notably, // the dialer expects a regular `V1` response. Version::V1Lazy => { - log::debug!("Dialer: Expecting proposed protocol: {}", p); + tracing::debug!(protocol=%p, "Dialer: Expecting proposed protocol"); let hl = HeaderLine::from(Version::V1Lazy); let io = Negotiated::expecting(io.into_reader(), p, Some(hl)); return Poll::Ready(Ok((protocol, io))); @@ -180,14 +180,14 @@ where *this.state = State::AwaitProtocol { io, protocol }; } Message::Protocol(ref p) if p.as_ref() == protocol.as_ref() => { - log::debug!("Dialer: Received confirmation for protocol: {}", p); + tracing::debug!(protocol=%p, "Dialer: Received confirmation for protocol"); let io = Negotiated::completed(io.into_inner()); return Poll::Ready(Ok((protocol, io))); } Message::NotAvailable => { - log::debug!( - "Dialer: Received rejection of protocol: {}", - protocol.as_ref() + tracing::debug!( + protocol=%protocol.as_ref(), + "Dialer: Received rejection of protocol" ); let protocol = this.protocols.next().ok_or(NegotiationError::Failed)?; *this.state = State::SendProtocol { io, protocol } @@ -208,9 +208,10 @@ mod tests { use crate::listener_select_proto; use async_std::future::timeout; use async_std::net::{TcpListener, TcpStream}; - use log::info; use quickcheck::{Arbitrary, Gen, GenRange}; use std::time::Duration; + use tracing::metadata::LevelFilter; + use tracing_subscriber::EnvFilter; #[test] fn select_proto_basic() { @@ -266,7 +267,13 @@ mod tests { ListenerProtos(listen_protos): ListenerProtos, DialPayload(dial_payload): DialPayload, ) { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter( + EnvFilter::builder() + .with_default_directive(LevelFilter::DEBUG.into()) + .from_env_lossy(), + ) + .try_init(); async_std::task::block_on(async move { let listener = TcpListener::bind("0.0.0.0:0").await.unwrap(); @@ -312,7 +319,7 @@ mod tests { // got confirmation of the last proposed protocol, when `V1Lazy` // is used. - info!("Writing early data"); + tracing::info!("Writing early data"); io.write_all(&dial_payload).await.unwrap(); match io.complete().await { @@ -324,7 +331,7 @@ mod tests { server.await; client.await; - info!("---------------------------------------") + tracing::info!("---------------------------------------") }); } diff --git a/misc/multistream-select/src/length_delimited.rs b/misc/multistream-select/src/length_delimited.rs index cff2f4abc39..6515d00c717 100644 --- a/misc/multistream-select/src/length_delimited.rs +++ b/misc/multistream-select/src/length_delimited.rs @@ -170,7 +170,7 @@ where if (buf[*pos - 1] & 0x80) == 0 { // MSB is not set, indicating the end of the length prefix. let (len, _) = unsigned_varint::decode::u16(buf).map_err(|e| { - log::debug!("invalid length prefix: {}", e); + tracing::debug!("invalid length prefix: {e}"); io::Error::new(io::ErrorKind::InvalidData, "invalid length prefix") })?; diff --git a/misc/multistream-select/src/listener_select.rs b/misc/multistream-select/src/listener_select.rs index d0037a78619..21c507096e2 100644 --- a/misc/multistream-select/src/listener_select.rs +++ b/misc/multistream-select/src/listener_select.rs @@ -52,7 +52,7 @@ where .filter_map(|n| match Protocol::try_from(n.as_ref()) { Ok(p) => Some((n, p)), Err(e) => { - log::warn!( + tracing::warn!( "Listener: Ignoring invalid protocol: {} due to {}", n.as_ref(), e @@ -186,7 +186,7 @@ where // the dialer also raises `NegotiationError::Failed` when finally // reading the `N/A` response. if let ProtocolError::InvalidMessage = &err { - log::trace!( + tracing::trace!( "Listener: Negotiation failed with invalid \ message after protocol rejection." ); @@ -194,7 +194,7 @@ where } if let ProtocolError::IoError(e) = &err { if e.kind() == std::io::ErrorKind::UnexpectedEof { - log::trace!( + tracing::trace!( "Listener: Negotiation failed with EOF \ after protocol rejection." ); @@ -228,10 +228,10 @@ where }); let message = if protocol.is_some() { - log::debug!("Listener: confirming protocol: {}", p); + tracing::debug!(protocol=%p, "Listener: confirming protocol"); Message::Protocol(p.clone()) } else { - log::debug!("Listener: rejecting protocol: {}", p.as_ref()); + tracing::debug!(protocol=%p.as_ref(), "Listener: rejecting protocol"); Message::NotAvailable }; @@ -287,9 +287,9 @@ where // Otherwise expect to receive another message. match protocol { Some(protocol) => { - log::debug!( - "Listener: sent confirmed protocol: {}", - protocol.as_ref() + tracing::debug!( + protocol=%protocol.as_ref(), + "Listener: sent confirmed protocol" ); let io = Negotiated::completed(io.into_inner()); return Poll::Ready(Ok((protocol, io))); diff --git a/misc/multistream-select/src/negotiated.rs b/misc/multistream-select/src/negotiated.rs index 941b60765ca..a24014a4f5f 100644 --- a/misc/multistream-select/src/negotiated.rs +++ b/misc/multistream-select/src/negotiated.rs @@ -171,7 +171,7 @@ impl Negotiated { if let Message::Protocol(p) = &msg { if p.as_ref() == protocol.as_ref() { - log::debug!("Negotiated: Received confirmation for protocol: {}", p); + tracing::debug!(protocol=%p, "Negotiated: Received confirmation for protocol"); *this.state = State::Completed { io: io.into_inner(), }; @@ -317,7 +317,7 @@ where StateProj::Expecting { io, .. } => { let close_poll = io.poll_close(cx); if let Poll::Ready(Ok(())) = close_poll { - log::debug!("Stream closed. Confirmation from remote for optimstic protocol negotiation still pending.") + tracing::debug!("Stream closed. Confirmation from remote for optimstic protocol negotiation still pending") } close_poll } diff --git a/misc/multistream-select/src/protocol.rs b/misc/multistream-select/src/protocol.rs index be2f3122da0..d5c2bfa773a 100644 --- a/misc/multistream-select/src/protocol.rs +++ b/misc/multistream-select/src/protocol.rs @@ -403,7 +403,7 @@ where return Poll::Ready(None); }; - log::trace!("Received message: {:?}", msg); + tracing::trace!(message=?msg, "Received message"); Poll::Ready(Some(Ok(msg))) } diff --git a/misc/server/Cargo.toml b/misc/server/Cargo.toml index efaa43a8658..e69c4f0b5cd 100644 --- a/misc/server/Cargo.toml +++ b/misc/server/Cargo.toml @@ -13,17 +13,17 @@ license = "MIT" [dependencies] base64 = "0.21" clap = { version = "4.4.7", features = ["derive"] } -env_logger = "0.10.0" futures = "0.3" futures-timer = "3" hyper = { version = "0.14", features = ["server", "tcp", "http1"] } libp2p = { workspace = true, features = ["autonat", "dns", "tokio", "noise", "tcp", "yamux", "identify", "kad", "ping", "relay", "metrics", "rsa", "macros", "quic"] } -log = "0.4" prometheus-client = { workspace = true } serde = "1.0.190" serde_derive = "1.0.125" serde_json = "1.0" tokio = { version = "1", features = ["rt-multi-thread", "macros"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } zeroize = "1" [lints] diff --git a/misc/server/src/http_service.rs b/misc/server/src/http_service.rs index 1f5ebaff593..7905933fbf5 100644 --- a/misc/server/src/http_service.rs +++ b/misc/server/src/http_service.rs @@ -21,7 +21,6 @@ use hyper::http::StatusCode; use hyper::service::Service; use hyper::{Body, Method, Request, Response, Server}; -use log::info; use prometheus_client::encoding::text::encode; use prometheus_client::registry::Registry; use std::future::Future; @@ -38,11 +37,7 @@ pub(crate) async fn metrics_server( let addr = ([0, 0, 0, 0], 8888).into(); let server = Server::bind(&addr).serve(MakeMetricService::new(registry, metrics_path.clone())); - info!( - "Metrics server on http://{}{}", - server.local_addr(), - metrics_path - ); + tracing::info!(metrics_server=%format!("http://{}{}", server.local_addr(), metrics_path)); server.await?; Ok(()) } diff --git a/misc/server/src/main.rs b/misc/server/src/main.rs index 0573aae5c6f..d42675ec5f9 100644 --- a/misc/server/src/main.rs +++ b/misc/server/src/main.rs @@ -9,7 +9,6 @@ use libp2p::metrics::{Metrics, Recorder}; use libp2p::swarm::SwarmEvent; use libp2p::tcp; use libp2p::{identify, noise, yamux}; -use log::{debug, info, warn}; use prometheus_client::metrics::info::Info; use prometheus_client::registry::Registry; use std::error::Error; @@ -17,6 +16,7 @@ use std::path::PathBuf; use std::str::FromStr; use std::task::Poll; use std::time::Duration; +use tracing_subscriber::EnvFilter; use zeroize::Zeroizing; mod behaviour; @@ -47,7 +47,9 @@ struct Opts { #[tokio::main] async fn main() -> Result<(), Box> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let opt = Opts::parse(); @@ -84,25 +86,25 @@ async fn main() -> Result<(), Box> { .build(); if config.addresses.swarm.is_empty() { - warn!("No listen addresses configured."); + tracing::warn!("No listen addresses configured"); } for address in &config.addresses.swarm { match swarm.listen_on(address.clone()) { Ok(_) => {} Err(e @ libp2p::TransportError::MultiaddrNotSupported(_)) => { - warn!("Failed to listen on {address}, continuing anyways, {e}") + tracing::warn!(%address, "Failed to listen on address, continuing anyways, {e}") } Err(e) => return Err(e.into()), } } if config.addresses.append_announce.is_empty() { - warn!("No external addresses configured."); + tracing::warn!("No external addresses configured"); } for address in &config.addresses.append_announce { swarm.add_external_address(address.clone()) } - info!( + tracing::info!( "External addresses: {:?}", swarm.external_addresses().collect::>() ); @@ -117,7 +119,7 @@ async fn main() -> Result<(), Box> { ); tokio::spawn(async move { if let Err(e) = http_service::metrics_server(metric_registry, opt.metrics_path).await { - log::error!("Metrics server failed: {e}"); + tracing::error!("Metrics server failed: {e}"); } }); @@ -137,7 +139,7 @@ async fn main() -> Result<(), Box> { metrics.record(&event); match event { SwarmEvent::Behaviour(behaviour::BehaviourEvent::Identify(e)) => { - info!("{:?}", e); + tracing::info!("{:?}", e); metrics.record(&e); if let identify::Event::Received { @@ -162,24 +164,24 @@ async fn main() -> Result<(), Box> { } } SwarmEvent::Behaviour(behaviour::BehaviourEvent::Ping(e)) => { - debug!("{:?}", e); + tracing::debug!("{:?}", e); metrics.record(&e); } SwarmEvent::Behaviour(behaviour::BehaviourEvent::Kademlia(e)) => { - debug!("{:?}", e); + tracing::debug!("{:?}", e); metrics.record(&e); } SwarmEvent::Behaviour(behaviour::BehaviourEvent::Relay(e)) => { - info!("{:?}", e); + tracing::info!("{:?}", e); metrics.record(&e) } SwarmEvent::Behaviour(behaviour::BehaviourEvent::Autonat(e)) => { - info!("{:?}", e); + tracing::info!("{:?}", e); // TODO: Add metric recording for `NatStatus`. // metrics.record(&e) } SwarmEvent::NewListenAddr { address, .. } => { - info!("Listening on {address:?}"); + tracing::info!(%address, "Listening on address"); } _ => {} } diff --git a/misc/webrtc-utils/Cargo.toml b/misc/webrtc-utils/Cargo.toml index 4401ef9bc44..868ab8db8fb 100644 --- a/misc/webrtc-utils/Cargo.toml +++ b/misc/webrtc-utils/Cargo.toml @@ -11,13 +11,13 @@ version = "0.1.0" publish = true [dependencies] +asynchronous-codec = "0.6" bytes = "1" futures = "0.3" hex = "0.4" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } libp2p-noise = { workspace = true } -log = "0.4.19" quick-protobuf = "0.8" quick-protobuf-codec = { workspace = true } rand = "0.8" @@ -25,7 +25,7 @@ serde = { version = "1.0", features = ["derive"] } sha2 = "0.10.8" thiserror = "1" tinytemplate = "1.2" -asynchronous-codec = "0.6" +tracing = "0.1.37" [dev-dependencies] hex-literal = "0.4" diff --git a/misc/webrtc-utils/src/sdp.rs b/misc/webrtc-utils/src/sdp.rs index 7c4facaf27e..0796548f449 100644 --- a/misc/webrtc-utils/src/sdp.rs +++ b/misc/webrtc-utils/src/sdp.rs @@ -34,7 +34,7 @@ pub fn answer(addr: SocketAddr, server_fingerprint: Fingerprint, client_ufrag: & client_ufrag, ); - log::trace!("Created SDP answer: {answer}"); + tracing::trace!(%answer, "Created SDP answer"); answer } diff --git a/misc/webrtc-utils/src/stream/drop_listener.rs b/misc/webrtc-utils/src/stream/drop_listener.rs index b638ea84b09..9745e3d4364 100644 --- a/misc/webrtc-utils/src/stream/drop_listener.rs +++ b/misc/webrtc-utils/src/stream/drop_listener.rs @@ -79,7 +79,7 @@ where return Poll::Ready(Ok(())); } Poll::Ready(Err(Canceled)) => { - log::info!("Stream dropped without graceful close, sending Reset"); + tracing::info!("Stream dropped without graceful close, sending Reset"); *state = State::SendingReset { stream }; continue; } diff --git a/muxers/mplex/Cargo.toml b/muxers/mplex/Cargo.toml index c4286e16169..c38b11dca9e 100644 --- a/muxers/mplex/Cargo.toml +++ b/muxers/mplex/Cargo.toml @@ -16,23 +16,23 @@ futures = "0.3.29" asynchronous-codec = "0.6" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4" nohash-hasher = "0.2" parking_lot = "0.12" rand = "0.8" smallvec = "1.11.1" +tracing = "0.1.37" unsigned-varint = { version = "0.7", features = ["asynchronous_codec"] } [dev-dependencies] async-std = { version = "1.7.0", features = ["attributes"] } criterion = "0.5" -env_logger = "0.10" futures = "0.3" libp2p-identity = { workspace = true, features = ["rand"] } libp2p-muxer-test-harness = { path = "../test-harness" } libp2p-plaintext = { workspace = true } libp2p-tcp = { workspace = true, features = ["async-io"] } quickcheck = { workspace = true } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [[bench]] name = "split_send_size" diff --git a/muxers/mplex/benches/split_send_size.rs b/muxers/mplex/benches/split_send_size.rs index 86f84ceab2c..9a9814d2f2a 100644 --- a/muxers/mplex/benches/split_send_size.rs +++ b/muxers/mplex/benches/split_send_size.rs @@ -35,6 +35,7 @@ use libp2p_mplex as mplex; use libp2p_plaintext as plaintext; use std::pin::Pin; use std::time::Duration; +use tracing_subscriber::EnvFilter; type BenchTransport = transport::Boxed<(PeerId, muxing::StreamMuxerBox)>; @@ -51,7 +52,9 @@ const BENCH_SIZES: [usize; 8] = [ ]; fn prepare(c: &mut Criterion) { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let payload: Vec = vec![1; 1024 * 1024]; diff --git a/muxers/mplex/src/io.rs b/muxers/mplex/src/io.rs index 8002ad383d6..0dd8b9ea6a9 100644 --- a/muxers/mplex/src/io.rs +++ b/muxers/mplex/src/io.rs @@ -24,7 +24,6 @@ use asynchronous_codec::Framed; use bytes::Bytes; use futures::task::{waker_ref, ArcWake, AtomicWaker, WakerRef}; use futures::{prelude::*, ready, stream::Fuse}; -use log::{debug, trace}; use nohash_hasher::{IntMap, IntSet}; use parking_lot::Mutex; use smallvec::SmallVec; @@ -117,7 +116,7 @@ where /// Creates a new multiplexed I/O stream. pub(crate) fn new(io: C, config: MplexConfig) -> Self { let id = ConnectionId(rand::random()); - debug!("New multiplexed connection: {}", id); + tracing::debug!(connection=%id, "New multiplexed connection"); Multiplexed { id, config, @@ -254,9 +253,11 @@ where // Check the stream limits. if self.substreams.len() >= self.config.max_substreams { - debug!( - "{}: Maximum number of substreams reached ({})", - self.id, self.config.max_substreams + tracing::debug!( + connection=%self.id, + total_substreams=%self.substreams.len(), + max_substreams=%self.config.max_substreams, + "Maximum number of substreams reached" ); self.notifier_open.register(cx.waker()); return Poll::Pending; @@ -276,11 +277,11 @@ where buf: Default::default(), }, ); - debug!( - "{}: New outbound substream: {} (total {})", - self.id, - stream_id, - self.substreams.len() + tracing::debug!( + connection=%self.id, + substream=%stream_id, + total_substreams=%self.substreams.len(), + "New outbound substream" ); // The flush is delayed and the `Open` frame may be sent // together with other frames in the same transport packet. @@ -348,7 +349,11 @@ where if self.check_max_pending_frames().is_err() { return; } - trace!("{}: Pending close for stream {}", self.id, id); + tracing::trace!( + connection=%self.id, + substream=%id, + "Pending close for substream" + ); self.pending_frames .push_front(Frame::Close { stream_id: id }); } @@ -356,7 +361,11 @@ where if self.check_max_pending_frames().is_err() { return; } - trace!("{}: Pending reset for stream {}", self.id, id); + tracing::trace!( + connection=%self.id, + substream=%id, + "Pending reset for substream" + ); self.pending_frames .push_front(Frame::Reset { stream_id: id }); } @@ -476,11 +485,11 @@ where frame @ Frame::Open { .. } => { if let Some(id) = self.on_open(frame.remote_id())? { self.open_buffer.push_front(id); - trace!( - "{}: Buffered new inbound stream {} (total: {})", - self.id, - id, - self.open_buffer.len() + tracing::trace!( + connection=%self.id, + inbound_stream=%id, + inbound_buffer_len=%self.open_buffer.len(), + "Buffered new inbound stream" ); self.notifier_read.wake_next_stream(); } @@ -516,7 +525,11 @@ where self.guard_open()?; ready!(self.poll_flush(cx))?; - trace!("{}: Flushed substream {}", self.id, id); + tracing::trace!( + connection=%self.id, + substream=%id, + "Flushed substream" + ); Poll::Ready(Ok(())) } @@ -554,7 +567,11 @@ where self.substreams.insert(id, SubstreamState::Open { buf }); Poll::Pending } else { - debug!("{}: Closed substream {} (half-close)", self.id, id); + tracing::debug!( + connection=%self.id, + substream=%id, + "Closed substream (half-close)" + ); self.substreams .insert(id, SubstreamState::SendClosed { buf }); Poll::Ready(Ok(())) @@ -569,7 +586,11 @@ where .insert(id, SubstreamState::RecvClosed { buf }); Poll::Pending } else { - debug!("{}: Closed substream {}", self.id, id); + tracing::debug!( + connection=%self.id, + substream=%id, + "Closed substream" + ); self.substreams.insert(id, SubstreamState::Closed { buf }); Poll::Ready(Ok(())) } @@ -589,7 +610,7 @@ where match ready!(self.io.poll_ready_unpin(&mut Context::from_waker(&waker))) { Ok(()) => { let frame = frame(); - trace!("{}: Sending {:?}", self.id, frame); + tracing::trace!(connection=%self.id, ?frame, "Sending frame"); match self.io.start_send_unpin(frame) { Ok(()) => Poll::Ready(Ok(())), Err(e) => Poll::Ready(self.on_error(e)), @@ -618,7 +639,11 @@ where // Perform any pending flush before reading. if let Some(id) = &stream_id { if self.pending_flush_open.contains(id) { - trace!("{}: Executing pending flush for {}.", self.id, id); + tracing::trace!( + connection=%self.id, + substream=%id, + "Executing pending flush for substream" + ); ready!(self.poll_flush(cx))?; self.pending_flush_open = Default::default(); } @@ -634,9 +659,9 @@ where if !self.notifier_read.wake_read_stream(*blocked_id) { // No task dedicated to the blocked stream woken, so schedule // this task again to have a chance at progress. - trace!( - "{}: No task to read from blocked stream. Waking current task.", - self.id + tracing::trace!( + connection=%self.id, + "No task to read from blocked stream. Waking current task." ); cx.waker().clone().wake(); } else if let Some(id) = stream_id { @@ -664,7 +689,7 @@ where }; match ready!(self.io.poll_next_unpin(&mut Context::from_waker(&waker))) { Some(Ok(frame)) => { - trace!("{}: Received {:?}", self.id, frame); + tracing::trace!(connection=%self.id, ?frame, "Received frame"); Poll::Ready(Ok(frame)) } Some(Err(e)) => Poll::Ready(self.on_error(e)), @@ -677,9 +702,10 @@ where let id = id.into_local(); if self.substreams.contains_key(&id) { - debug!( - "{}: Received unexpected `Open` frame for open substream {}", - self.id, id + tracing::debug!( + connection=%self.id, + substream=%id, + "Received unexpected `Open` frame for open substream", ); return self.on_error(io::Error::new( io::ErrorKind::Other, @@ -688,12 +714,17 @@ where } if self.substreams.len() >= self.config.max_substreams { - debug!( - "{}: Maximum number of substreams exceeded: {}", - self.id, self.config.max_substreams + tracing::debug!( + connection=%self.id, + max_substreams=%self.config.max_substreams, + "Maximum number of substreams exceeded" ); self.check_max_pending_frames()?; - debug!("{}: Pending reset for new stream {}", self.id, id); + tracing::debug!( + connection=%self.id, + substream=%id, + "Pending reset for new substream" + ); self.pending_frames .push_front(Frame::Reset { stream_id: id }); return Ok(None); @@ -706,11 +737,11 @@ where }, ); - debug!( - "{}: New inbound substream: {} (total {})", - self.id, - id, - self.substreams.len() + tracing::debug!( + connection=%self.id, + substream=%id, + total_substreams=%self.substreams.len(), + "New inbound substream" ); Ok(Some(id)) @@ -721,23 +752,27 @@ where if let Some(state) = self.substreams.remove(&id) { match state { SubstreamState::Closed { .. } => { - trace!( - "{}: Ignoring reset for mutually closed substream {}.", - self.id, - id + tracing::trace!( + connection=%self.id, + substream=%id, + "Ignoring reset for mutually closed substream" ); } SubstreamState::Reset { .. } => { - trace!( - "{}: Ignoring redundant reset for already reset substream {}", - self.id, - id + tracing::trace!( + connection=%self.id, + substream=%id, + "Ignoring redundant reset for already reset substream" ); } SubstreamState::RecvClosed { buf } | SubstreamState::SendClosed { buf } | SubstreamState::Open { buf } => { - debug!("{}: Substream {} reset by remote.", self.id, id); + tracing::debug!( + connection=%self.id, + substream=%id, + "Substream reset by remote" + ); self.substreams.insert(id, SubstreamState::Reset { buf }); // Notify tasks interested in reading from that stream, // so they may read the EOF. @@ -745,10 +780,10 @@ where } } } else { - trace!( - "{}: Ignoring `Reset` for unknown substream {}. Possibly dropped earlier.", - self.id, - id + tracing::trace!( + connection=%self.id, + substream=%id, + "Ignoring `Reset` for unknown substream, possibly dropped earlier" ); } } @@ -758,32 +793,36 @@ where if let Some(state) = self.substreams.remove(&id) { match state { SubstreamState::RecvClosed { .. } | SubstreamState::Closed { .. } => { - debug!( - "{}: Ignoring `Close` frame for closed substream {}", - self.id, id + tracing::debug!( + connection=%self.id, + substream=%id, + "Ignoring `Close` frame for closed substream" ); self.substreams.insert(id, state); } SubstreamState::Reset { buf } => { - debug!( - "{}: Ignoring `Close` frame for already reset substream {}", - self.id, id + tracing::debug!( + connection=%self.id, + substream=%id, + "Ignoring `Close` frame for already reset substream" ); self.substreams.insert(id, SubstreamState::Reset { buf }); } SubstreamState::SendClosed { buf } => { - debug!( - "{}: Substream {} closed by remote (SendClosed -> Closed).", - self.id, id + tracing::debug!( + connection=%self.id, + substream=%id, + "Substream closed by remote (SendClosed -> Closed)" ); self.substreams.insert(id, SubstreamState::Closed { buf }); // Notify tasks interested in reading, so they may read the EOF. self.notifier_read.wake_read_stream(id); } SubstreamState::Open { buf } => { - debug!( - "{}: Substream {} closed by remote (Open -> RecvClosed)", - self.id, id + tracing::debug!( + connection=%self.id, + substream=%id, + "Substream closed by remote (Open -> RecvClosed)" ); self.substreams .insert(id, SubstreamState::RecvClosed { buf }); @@ -792,10 +831,10 @@ where } } } else { - trace!( - "{}: Ignoring `Close` for unknown substream {}. Possibly dropped earlier.", - self.id, - id + tracing::trace!( + connection=%self.id, + substream=%id, + "Ignoring `Close` for unknown substream, possibly dropped earlier." ); } } @@ -829,7 +868,11 @@ where /// Records a fatal error for the multiplexed I/O stream. fn on_error(&mut self, e: io::Error) -> io::Result { - debug!("{}: Multiplexed connection failed: {:?}", self.id, e); + tracing::debug!( + connection=%self.id, + "Multiplexed connection failed: {:?}", + e + ); self.status = Status::Err(io::Error::new(e.kind(), e.to_string())); self.pending_frames = Default::default(); self.substreams = Default::default(); @@ -872,11 +915,11 @@ where let state = if let Some(state) = self.substreams.get_mut(&id) { state } else { - trace!( - "{}: Dropping data {:?} for unknown substream {}", - self.id, - data, - id + tracing::trace!( + connection=%self.id, + substream=%id, + data=?data, + "Dropping data for unknown substream" ); return Ok(()); }; @@ -884,33 +927,41 @@ where let buf = if let Some(buf) = state.recv_buf_open() { buf } else { - trace!( - "{}: Dropping data {:?} for closed or reset substream {}", - self.id, - data, - id + tracing::trace!( + connection=%self.id, + substream=%id, + data=?data, + "Dropping data for closed or reset substream", ); return Ok(()); }; debug_assert!(buf.len() <= self.config.max_buffer_len); - trace!( - "{}: Buffering {:?} for stream {} (total: {})", - self.id, - data, - id, - buf.len() + 1 + tracing::trace!( + connection=%self.id, + substream=%id, + data=?data, + data_buffer=%buf.len() + 1, + "Buffering data for substream" ); buf.push(data); self.notifier_read.wake_read_stream(id); if buf.len() > self.config.max_buffer_len { - debug!("{}: Frame buffer of stream {} is full.", self.id, id); + tracing::debug!( + connection=%self.id, + substream=%id, + "Frame buffer of substream is full" + ); match self.config.max_buffer_behaviour { MaxBufferBehaviour::ResetStream => { let buf = buf.clone(); self.check_max_pending_frames()?; self.substreams.insert(id, SubstreamState::Reset { buf }); - debug!("{}: Pending reset for stream {}", self.id, id); + tracing::debug!( + connection=%self.id, + substream=%id, + "Pending reset for stream" + ); self.pending_frames .push_front(Frame::Reset { stream_id: id }); } @@ -1179,7 +1230,10 @@ mod tests { #[test] fn max_buffer_behaviour() { - let _ = env_logger::try_init(); + use tracing_subscriber::EnvFilter; + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); fn prop(cfg: MplexConfig, overflow: NonZeroU8) { let mut r_buf = BytesMut::new(); @@ -1314,7 +1368,10 @@ mod tests { #[test] fn close_on_error() { - let _ = env_logger::try_init(); + use tracing_subscriber::EnvFilter; + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); fn prop(cfg: MplexConfig, num_streams: NonZeroU8) { let num_streams = cmp::min(cfg.max_substreams, num_streams.get() as usize); diff --git a/muxers/test-harness/Cargo.toml b/muxers/test-harness/Cargo.toml index 7eee7b3150b..05cef9ad49f 100644 --- a/muxers/test-harness/Cargo.toml +++ b/muxers/test-harness/Cargo.toml @@ -13,9 +13,9 @@ release = false [dependencies] libp2p-core = { workspace = true } futures = "0.3.29" -log = "0.4" futures-timer = "3.0.2" futures_ringbuf = "0.4.0" +tracing = "0.1.37" [lints] workspace = true diff --git a/muxers/test-harness/src/lib.rs b/muxers/test-harness/src/lib.rs index 233fe3a478c..16c71f414f0 100644 --- a/muxers/test-harness/src/lib.rs +++ b/muxers/test-harness/src/lib.rs @@ -149,20 +149,20 @@ async fn run( loop { match futures::future::select(dialer.next(), listener.next()).await { Either::Left((Some(Event::SetupComplete), _)) => { - log::info!("Dialer opened outbound stream"); + tracing::info!("Dialer opened outbound stream"); } Either::Left((Some(Event::ProtocolComplete), _)) => { - log::info!("Dialer completed protocol"); + tracing::info!("Dialer completed protocol"); dialer_complete = true } Either::Left((Some(Event::Timeout), _)) => { panic!("Dialer protocol timed out"); } Either::Right((Some(Event::SetupComplete), _)) => { - log::info!("Listener received inbound stream"); + tracing::info!("Listener received inbound stream"); } Either::Right((Some(Event::ProtocolComplete), _)) => { - log::info!("Listener completed protocol"); + tracing::info!("Listener completed protocol"); listener_complete = true } Either::Right((Some(Event::Timeout), _)) => { diff --git a/muxers/yamux/Cargo.toml b/muxers/yamux/Cargo.toml index c7c08365090..ec3d4b85c5b 100644 --- a/muxers/yamux/Cargo.toml +++ b/muxers/yamux/Cargo.toml @@ -15,7 +15,7 @@ futures = "0.3.29" libp2p-core = { workspace = true } thiserror = "1.0" yamux = "0.12" -log = "0.4" +tracing = "0.1.37" [dev-dependencies] async-std = { version = "1.7.0", features = ["attributes"] } diff --git a/muxers/yamux/src/lib.rs b/muxers/yamux/src/lib.rs index 073a5723d2e..d10cdfa244c 100644 --- a/muxers/yamux/src/lib.rs +++ b/muxers/yamux/src/lib.rs @@ -81,6 +81,7 @@ where type Substream = Stream; type Error = Error; + #[tracing::instrument(level = "trace", name = "StreamMuxer::poll_inbound", skip(self, cx))] fn poll_inbound( mut self: Pin<&mut Self>, cx: &mut Context<'_>, @@ -97,6 +98,7 @@ where Poll::Pending } + #[tracing::instrument(level = "trace", name = "StreamMuxer::poll_outbound", skip(self, cx))] fn poll_outbound( mut self: Pin<&mut Self>, cx: &mut Context<'_>, @@ -106,12 +108,14 @@ where Poll::Ready(Ok(Stream(stream))) } + #[tracing::instrument(level = "trace", name = "StreamMuxer::poll_close", skip(self, cx))] fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { ready!(self.connection.poll_close(cx).map_err(Error)?); Poll::Ready(Ok(())) } + #[tracing::instrument(level = "trace", name = "StreamMuxer::poll", skip(self, cx))] fn poll( self: Pin<&mut Self>, cx: &mut Context<'_>, @@ -121,7 +125,10 @@ where let inbound_stream = ready!(this.poll_inner(cx))?; if this.inbound_stream_buffer.len() >= MAX_BUFFERED_INBOUND_STREAMS { - log::warn!("dropping {} because buffer is full", inbound_stream.0); + tracing::warn!( + stream=%inbound_stream.0, + "dropping stream because buffer is full" + ); drop(inbound_stream); } else { this.inbound_stream_buffer.push_back(inbound_stream); diff --git a/protocols/autonat/Cargo.toml b/protocols/autonat/Cargo.toml index 9acad187586..cadddfa91fd 100644 --- a/protocols/autonat/Cargo.toml +++ b/protocols/autonat/Cargo.toml @@ -19,16 +19,16 @@ libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-request-response = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4" -rand = "0.8" quick-protobuf = "0.8" +rand = "0.8" +tracing = "0.1.37" quick-protobuf-codec = { workspace = true } asynchronous-codec = "0.6.2" [dev-dependencies] async-std = { version = "1.10", features = ["attributes"] } -env_logger = "0.10" libp2p-swarm-test = { path = "../../swarm-test" } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/autonat/src/behaviour.rs b/protocols/autonat/src/behaviour.rs index e9a73fd3fcb..06c945eb888 100644 --- a/protocols/autonat/src/behaviour.rs +++ b/protocols/autonat/src/behaviour.rs @@ -435,6 +435,7 @@ impl NetworkBehaviour for Behaviour { as NetworkBehaviour>::ConnectionHandler; type ToSwarm = Event; + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, diff --git a/protocols/autonat/src/behaviour/as_client.rs b/protocols/autonat/src/behaviour/as_client.rs index 6f37d32620b..7c9242d47cc 100644 --- a/protocols/autonat/src/behaviour/as_client.rs +++ b/protocols/autonat/src/behaviour/as_client.rs @@ -112,7 +112,7 @@ impl<'a> HandleInnerEvent for AsClient<'a> { response, }, } => { - log::debug!("Outbound dial-back request returned {:?}.", response); + tracing::debug!(?response, "Outbound dial-back request returned response"); let probe_id = self .ongoing_outbound @@ -154,10 +154,10 @@ impl<'a> HandleInnerEvent for AsClient<'a> { error, request_id, } => { - log::debug!( - "Outbound Failure {} when on dial-back request to peer {}.", + tracing::debug!( + %peer, + "Outbound Failure {} when on dial-back request to peer.", error, - peer ); let probe_id = self .ongoing_outbound @@ -275,13 +275,13 @@ impl<'a> AsClient<'a> { ) -> Result { let _ = self.last_probe.insert(Instant::now()); if addresses.is_empty() { - log::debug!("Outbound dial-back request aborted: No dial-back addresses."); + tracing::debug!("Outbound dial-back request aborted: No dial-back addresses"); return Err(OutboundProbeError::NoAddresses); } let server = match self.random_server() { Some(s) => s, None => { - log::debug!("Outbound dial-back request aborted: No qualified server."); + tracing::debug!("Outbound dial-back request aborted: No qualified server"); return Err(OutboundProbeError::NoServer); } }; @@ -293,7 +293,7 @@ impl<'a> AsClient<'a> { }, ); self.throttled_servers.push((server, Instant::now())); - log::debug!("Send dial-back request to peer {}.", server); + tracing::debug!(peer=%server, "Send dial-back request to peer"); self.ongoing_outbound.insert(request_id, probe_id); Ok(server) } @@ -344,10 +344,10 @@ impl<'a> AsClient<'a> { return None; } - log::debug!( - "Flipped assumed NAT status from {:?} to {:?}", - self.nat_status, - reported_status + tracing::debug!( + old_status=?self.nat_status, + new_status=?reported_status, + "Flipped assumed NAT status" ); let old_status = self.nat_status.clone(); diff --git a/protocols/autonat/src/behaviour/as_server.rs b/protocols/autonat/src/behaviour/as_server.rs index 65c9738647e..6185ecc50e2 100644 --- a/protocols/autonat/src/behaviour/as_server.rs +++ b/protocols/autonat/src/behaviour/as_server.rs @@ -110,9 +110,9 @@ impl<'a> HandleInnerEvent for AsServer<'a> { let probe_id = self.probe_id.next(); match self.resolve_inbound_request(peer, request) { Ok(addrs) => { - log::debug!( - "Inbound dial request from Peer {} with dial-back addresses {:?}.", - peer, + tracing::debug!( + %peer, + "Inbound dial request from peer with dial-back addresses {:?}", addrs ); @@ -140,10 +140,10 @@ impl<'a> HandleInnerEvent for AsServer<'a> { ]) } Err((status_text, error)) => { - log::debug!( - "Reject inbound dial request from peer {}: {}.", - peer, - status_text + tracing::debug!( + %peer, + status=%status_text, + "Reject inbound dial request from peer" ); let response = DialResponse { @@ -167,10 +167,10 @@ impl<'a> HandleInnerEvent for AsServer<'a> { error, request_id, } => { - log::debug!( - "Inbound Failure {} when on dial-back request from peer {}.", - error, - peer + tracing::debug!( + %peer, + "Inbound Failure {} when on dial-back request from peer", + error ); let probe_id = match self.ongoing_inbound.get(&peer) { @@ -206,10 +206,10 @@ impl<'a> AsServer<'a> { return None; } - log::debug!( - "Dial-back to peer {} succeeded at addr {:?}.", - peer, - address + tracing::debug!( + %peer, + %address, + "Dial-back to peer succeeded" ); let (probe_id, _, _, channel) = self.ongoing_inbound.remove(peer).unwrap(); @@ -232,11 +232,19 @@ impl<'a> AsServer<'a> { error: &DialError, ) -> Option { let (probe_id, _, _, channel) = peer.and_then(|p| self.ongoing_inbound.remove(&p))?; - log::debug!( - "Dial-back to peer {} failed with error {:?}.", - peer.unwrap(), - error - ); + + match peer { + Some(p) => tracing::debug!( + peer=%p, + "Dial-back to peer failed with error {:?}", + error + ), + None => tracing::debug!( + "Dial-back to non existent peer failed with error {:?}", + error + ), + }; + let response_error = ResponseError::DialError; let response = DialResponse { result: Err(response_error.clone()), diff --git a/protocols/autonat/src/protocol.rs b/protocols/autonat/src/protocol.rs index 904af6473e2..b28f70cadf4 100644 --- a/protocols/autonat/src/protocol.rs +++ b/protocols/autonat/src/protocol.rs @@ -129,7 +129,7 @@ impl DialRequest { { (peer_id, addrs) } else { - log::debug!("Received malformed dial message."); + tracing::debug!("Received malformed dial message"); return Err(io::Error::new( io::ErrorKind::InvalidData, "invalid dial message", @@ -146,7 +146,7 @@ impl DialRequest { .filter_map(|a| match Multiaddr::try_from(a.to_vec()) { Ok(a) => Some(a), Err(e) => { - log::debug!("Unable to parse multiaddr: {e}"); + tracing::debug!("Unable to parse multiaddr: {e}"); None } }) @@ -207,7 +207,7 @@ impl TryFrom for ResponseError { proto::ResponseStatus::E_BAD_REQUEST => Ok(ResponseError::BadRequest), proto::ResponseStatus::E_INTERNAL_ERROR => Ok(ResponseError::InternalError), proto::ResponseStatus::OK => { - log::debug!("Received response with status code OK but expected error."); + tracing::debug!("Received response with status code OK but expected error"); Err(io::Error::new( io::ErrorKind::InvalidData, "invalid response error type", @@ -251,7 +251,7 @@ impl DialResponse { result: Err(ResponseError::try_from(status)?), }, _ => { - log::debug!("Received malformed response message."); + tracing::debug!("Received malformed response message"); return Err(io::Error::new( io::ErrorKind::InvalidData, "invalid dial response message", diff --git a/protocols/dcutr/Cargo.toml b/protocols/dcutr/Cargo.toml index 0e59585a416..9079f4f8a97 100644 --- a/protocols/dcutr/Cargo.toml +++ b/protocols/dcutr/Cargo.toml @@ -19,10 +19,10 @@ instant = "0.1.12" libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4" quick-protobuf = "0.8" quick-protobuf-codec = { workspace = true } thiserror = "1.0" +tracing = "0.1.37" void = "1" lru = "0.11.1" futures-bounded = { workspace = true } @@ -30,7 +30,6 @@ futures-bounded = { workspace = true } [dev-dependencies] async-std = { version = "1.12.0", features = ["attributes"] } clap = { version = "4.4.7", features = ["derive"] } -env_logger = "0.10.0" libp2p-dns = { workspace = true, features = ["async-std"] } libp2p-identify = { workspace = true } libp2p-noise = { workspace = true } @@ -42,6 +41,7 @@ libp2p-swarm-test = { path = "../../swarm-test" } libp2p-tcp = { workspace = true, features = ["async-io"] } libp2p-yamux = { workspace = true } rand = "0.8" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/dcutr/src/behaviour.rs b/protocols/dcutr/src/behaviour.rs index 6aecc596c71..d0b46abb0b4 100644 --- a/protocols/dcutr/src/behaviour.rs +++ b/protocols/dcutr/src/behaviour.rs @@ -268,9 +268,7 @@ impl NetworkBehaviour for Behaviour { match handler_event { Either::Left(handler::relayed::Event::InboundConnectNegotiated { remote_addrs }) => { - log::debug!( - "Attempting to hole-punch as dialer to {event_source} using {remote_addrs:?}" - ); + tracing::debug!(target=%event_source, addresses=?remote_addrs, "Attempting to hole-punch as dialer"); let opts = DialOpts::peer_id(event_source) .addresses(remote_addrs) @@ -302,9 +300,7 @@ impl NetworkBehaviour for Behaviour { // Maybe treat these as transient and retry? } Either::Left(handler::relayed::Event::OutboundConnectNegotiated { remote_addrs }) => { - log::debug!( - "Attempting to hole-punch as listener to {event_source} using {remote_addrs:?}" - ); + tracing::debug!(target=%event_source, addresses=?remote_addrs, "Attempting to hole-punch as dialer"); let opts = DialOpts::peer_id(event_source) .condition(dial_opts::PeerCondition::Always) diff --git a/protocols/dcutr/src/handler/relayed.rs b/protocols/dcutr/src/handler/relayed.rs index 9d600d234e5..b4daefce15f 100644 --- a/protocols/dcutr/src/handler/relayed.rs +++ b/protocols/dcutr/src/handler/relayed.rs @@ -110,7 +110,7 @@ impl Handler { )) .is_err() { - log::warn!( + tracing::warn!( "New inbound connect stream while still upgrading previous one. Replacing previous with new.", ); } @@ -142,7 +142,7 @@ impl Handler { )) .is_err() { - log::warn!( + tracing::warn!( "New outbound connect stream while still upgrading previous one. Replacing previous with new.", ); } @@ -224,6 +224,7 @@ impl ConnectionHandler for Handler { false } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, diff --git a/protocols/dcutr/src/protocol/inbound.rs b/protocols/dcutr/src/protocol/inbound.rs index 95665843724..b8f90daf3a1 100644 --- a/protocols/dcutr/src/protocol/inbound.rs +++ b/protocols/dcutr/src/protocol/inbound.rs @@ -50,14 +50,14 @@ pub(crate) async fn handshake( .filter_map(|a| match Multiaddr::try_from(a.to_vec()) { Ok(a) => Some(a), Err(e) => { - log::debug!("Unable to parse multiaddr: {e}"); + tracing::debug!("Unable to parse multiaddr: {e}"); None } }) // Filter out relayed addresses. .filter(|a| { if a.iter().any(|p| p == Protocol::P2pCircuit) { - log::debug!("Dropping relayed address {a}"); + tracing::debug!(address=%a, "Dropping relayed address"); false } else { true diff --git a/protocols/dcutr/src/protocol/outbound.rs b/protocols/dcutr/src/protocol/outbound.rs index 67c7116d706..d9cb60a01f6 100644 --- a/protocols/dcutr/src/protocol/outbound.rs +++ b/protocols/dcutr/src/protocol/outbound.rs @@ -68,14 +68,14 @@ pub(crate) async fn handshake( .filter_map(|a| match Multiaddr::try_from(a.to_vec()) { Ok(a) => Some(a), Err(e) => { - log::debug!("Unable to parse multiaddr: {e}"); + tracing::debug!("Unable to parse multiaddr: {e}"); None } }) // Filter out relayed addresses. .filter(|a| { if a.iter().any(|p| p == Protocol::P2pCircuit) { - log::debug!("Dropping relayed address {a}"); + tracing::debug!(address=%a, "Dropping relayed address"); false } else { true diff --git a/protocols/dcutr/tests/lib.rs b/protocols/dcutr/tests/lib.rs index 1c5ddb5a972..a939fbccd11 100644 --- a/protocols/dcutr/tests/lib.rs +++ b/protocols/dcutr/tests/lib.rs @@ -30,10 +30,13 @@ use libp2p_relay as relay; use libp2p_swarm::{Config, NetworkBehaviour, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt as _; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[async_std::test] async fn connect() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut relay = build_relay(); let mut dst = build_client(); diff --git a/protocols/floodsub/Cargo.toml b/protocols/floodsub/Cargo.toml index 7acdd851655..9d5776c56b7 100644 --- a/protocols/floodsub/Cargo.toml +++ b/protocols/floodsub/Cargo.toml @@ -19,12 +19,12 @@ futures = "0.3.29" libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4" quick-protobuf = "0.8" quick-protobuf-codec = { workspace = true } rand = "0.8" smallvec = "1.11.1" thiserror = "1.0.50" +tracing = "0.1.37" # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/floodsub/src/layer.rs b/protocols/floodsub/src/layer.rs index 5b6b89fea87..7fa9f3001b1 100644 --- a/protocols/floodsub/src/layer.rs +++ b/protocols/floodsub/src/layer.rs @@ -34,7 +34,6 @@ use libp2p_swarm::{ dial_opts::DialOpts, CloseConnection, ConnectionDenied, ConnectionId, NetworkBehaviour, NotifyHandler, OneShotHandler, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use log::warn; use smallvec::SmallVec; use std::collections::hash_map::{DefaultHasher, HashMap}; use std::task::{Context, Poll}; @@ -224,7 +223,7 @@ impl Floodsub { .any(|t| message.topics.iter().any(|u| t == u)); if self_subscribed { if let Err(e @ CuckooError::NotEnoughSpace) = self.received.add(&message) { - warn!( + tracing::warn!( "Message was added to 'received' Cuckoofilter but some \ other message was removed as a consequence: {}", e, @@ -363,7 +362,7 @@ impl NetworkBehaviour for Floodsub { Ok(InnerMessage::Rx(event)) => event, Ok(InnerMessage::Sent) => return, Err(e) => { - log::debug!("Failed to send floodsub message: {e}"); + tracing::debug!("Failed to send floodsub message: {e}"); self.events.push_back(ToSwarm::CloseConnection { peer_id: propagation_source, connection: CloseConnection::One(connection_id), @@ -415,7 +414,7 @@ impl NetworkBehaviour for Floodsub { Ok(false) => continue, // Message already existed. Err(e @ CuckooError::NotEnoughSpace) => { // Message added, but some other removed. - warn!( + tracing::warn!( "Message was added to 'received' Cuckoofilter but some \ other message was removed as a consequence: {}", e, diff --git a/protocols/gossipsub/Cargo.toml b/protocols/gossipsub/Cargo.toml index d76f9a3e364..1c2758e44b4 100644 --- a/protocols/gossipsub/Cargo.toml +++ b/protocols/gossipsub/Cargo.toml @@ -28,7 +28,6 @@ instant = "0.1.12" libp2p-core = { workspace = true } libp2p-identity = { workspace = true, features = ["rand"] } libp2p-swarm = { workspace = true } -log = "0.4.20" quick-protobuf = "0.8" quick-protobuf-codec = { workspace = true } rand = "0.8" @@ -36,6 +35,7 @@ regex = "1.10.2" serde = { version = "1", optional = true, features = ["derive"] } sha2 = "0.10.8" smallvec = "1.11.1" +tracing = "0.1.37" unsigned-varint = { version = "0.7.2", features = ["asynchronous_codec"] } void = "1.0.2" @@ -44,13 +44,13 @@ prometheus-client = { workspace = true } [dev-dependencies] async-std = { version = "1.6.3", features = ["unstable"] } -env_logger = "0.10.0" hex = "0.4.2" libp2p-core = { workspace = true } libp2p-yamux = { workspace = true } libp2p-noise = { workspace = true } libp2p-swarm-test = { path = "../../swarm-test" } quickcheck = { workspace = true } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 2a3a13ea6e7..f1069658b73 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -31,7 +31,6 @@ use std::{ use futures::StreamExt; use futures_ticker::Ticker; -use log::{debug, error, trace, warn}; use prometheus_client::registry::Registry; use rand::{seq::SliceRandom, thread_rng}; @@ -523,14 +522,14 @@ where /// Returns [`Ok(true)`] if the subscription worked. Returns [`Ok(false)`] if we were already /// subscribed. pub fn subscribe(&mut self, topic: &Topic) -> Result { - debug!("Subscribing to topic: {}", topic); + tracing::debug!(%topic, "Subscribing to topic"); let topic_hash = topic.hash(); if !self.subscription_filter.can_subscribe(&topic_hash) { return Err(SubscriptionError::NotAllowed); } if self.mesh.get(&topic_hash).is_some() { - debug!("Topic: {} is already in the mesh.", topic); + tracing::debug!(%topic, "Topic is already in the mesh"); return Ok(false); } @@ -548,7 +547,7 @@ where .into_protobuf(); for peer in peer_list { - debug!("Sending SUBSCRIBE to peer: {:?}", peer); + tracing::debug!(%peer, "Sending SUBSCRIBE to peer"); self.send_message(peer, event.clone()) .map_err(SubscriptionError::PublishError)?; } @@ -557,7 +556,7 @@ where // call JOIN(topic) // this will add new peers to the mesh for the topic self.join(&topic_hash); - debug!("Subscribed to topic: {}", topic); + tracing::debug!(%topic, "Subscribed to topic"); Ok(true) } @@ -565,11 +564,11 @@ where /// /// Returns [`Ok(true)`] if we were subscribed to this topic. pub fn unsubscribe(&mut self, topic: &Topic) -> Result { - debug!("Unsubscribing from topic: {}", topic); + tracing::debug!(%topic, "Unsubscribing from topic"); let topic_hash = topic.hash(); if self.mesh.get(&topic_hash).is_none() { - debug!("Already unsubscribed from topic: {:?}", topic_hash); + tracing::debug!(topic=%topic_hash, "Already unsubscribed from topic"); // we are not subscribed return Ok(false); } @@ -588,7 +587,7 @@ where .into_protobuf(); for peer in peer_list { - debug!("Sending UNSUBSCRIBE to peer: {}", peer.to_string()); + tracing::debug!(%peer, "Sending UNSUBSCRIBE to peer"); self.send_message(peer, event.clone())?; } } @@ -597,7 +596,7 @@ where // this will remove the topic from the mesh self.leave(&topic_hash); - debug!("Unsubscribed from topic: {:?}", topic_hash); + tracing::debug!(topic=%topic_hash, "Unsubscribed from topic"); Ok(true) } @@ -641,14 +640,14 @@ where if self.duplicate_cache.contains(&msg_id) { // This message has already been seen. We don't re-publish messages that have already // been published on the network. - warn!( - "Not publishing a message that has already been published. Msg-id {}", - msg_id + tracing::warn!( + message=%msg_id, + "Not publishing a message that has already been published" ); return Err(PublishError::Duplicate); } - trace!("Publishing message: {:?}", msg_id); + tracing::trace!(message=%msg_id, "Publishing message"); let topic_hash = raw_message.topic.clone(); @@ -689,7 +688,7 @@ where // Gossipsub peers if self.mesh.get(&topic_hash).is_none() { - debug!("Topic: {:?} not in the mesh", topic_hash); + tracing::debug!(topic=%topic_hash, "Topic not in the mesh"); // If we have fanout peers add them to the map. if self.fanout.contains_key(&topic_hash) { for peer in self.fanout.get(&topic_hash).expect("Topic must exist") { @@ -715,7 +714,7 @@ where // Add the new peers to the fanout and recipient peers self.fanout.insert(topic_hash.clone(), new_peers.clone()); for peer in new_peers { - debug!("Peer added to fanout: {:?}", peer); + tracing::debug!(%peer, "Peer added to fanout"); recipient_peers.insert(peer); } } @@ -746,7 +745,7 @@ where // Send to peers we know are subscribed to the topic. let msg_bytes = event.get_size(); for peer_id in recipient_peers.iter() { - trace!("Sending message to peer: {:?}", peer_id); + tracing::trace!(peer=%peer_id, "Sending message to peer"); self.send_message(*peer_id, event.clone())?; if let Some(m) = self.metrics.as_mut() { @@ -754,7 +753,7 @@ where } } - debug!("Published message: {:?}", &msg_id); + tracing::debug!(message=%msg_id, "Published message"); if let Some(metrics) = self.metrics.as_mut() { metrics.register_published_message(&topic_hash); @@ -795,9 +794,9 @@ where (raw_message.clone(), originating_peers) } None => { - warn!( - "Message not in cache. Ignoring forwarding. Message Id: {}", - msg_id + tracing::warn!( + message=%msg_id, + "Message not in cache. Ignoring forwarding" ); if let Some(metrics) = self.metrics.as_mut() { metrics.memcache_miss(); @@ -842,14 +841,14 @@ where } Ok(true) } else { - warn!("Rejected message not in cache. Message Id: {}", msg_id); + tracing::warn!(message=%msg_id, "Rejected message not in cache"); Ok(false) } } /// Adds a new peer to the list of explicitly connected peers. pub fn add_explicit_peer(&mut self, peer_id: &PeerId) { - debug!("Adding explicit peer {}", peer_id); + tracing::debug!(peer=%peer_id, "Adding explicit peer"); self.explicit_peers.insert(*peer_id); @@ -859,7 +858,7 @@ where /// This removes the peer from explicitly connected peers, note that this does not disconnect /// the peer. pub fn remove_explicit_peer(&mut self, peer_id: &PeerId) { - debug!("Removing explicit peer {}", peer_id); + tracing::debug!(peer=%peer_id, "Removing explicit peer"); self.explicit_peers.remove(peer_id); } @@ -867,14 +866,14 @@ where /// created by this peer will be rejected. pub fn blacklist_peer(&mut self, peer_id: &PeerId) { if self.blacklisted_peers.insert(*peer_id) { - debug!("Peer has been blacklisted: {}", peer_id); + tracing::debug!(peer=%peer_id, "Peer has been blacklisted"); } } /// Removes a peer from the blacklist if it has previously been blacklisted. pub fn remove_blacklisted_peer(&mut self, peer_id: &PeerId) { if self.blacklisted_peers.remove(peer_id) { - debug!("Peer has been removed from the blacklist: {}", peer_id); + tracing::debug!(peer=%peer_id, "Peer has been removed from the blacklist"); } } @@ -943,11 +942,11 @@ where /// Gossipsub JOIN(topic) - adds topic peers to mesh and sends them GRAFT messages. fn join(&mut self, topic_hash: &TopicHash) { - debug!("Running JOIN for topic: {:?}", topic_hash); + tracing::debug!(topic=%topic_hash, "Running JOIN for topic"); // if we are already in the mesh, return if self.mesh.contains_key(topic_hash) { - debug!("JOIN: The topic is already in the mesh, ignoring JOIN"); + tracing::debug!(topic=%topic_hash, "JOIN: The topic is already in the mesh, ignoring JOIN"); return; } @@ -960,9 +959,9 @@ where // check if we have mesh_n peers in fanout[topic] and add them to the mesh if we do, // removing the fanout entry. if let Some((_, mut peers)) = self.fanout.remove_entry(topic_hash) { - debug!( - "JOIN: Removing peers from the fanout for topic: {:?}", - topic_hash + tracing::debug!( + topic=%topic_hash, + "JOIN: Removing peers from the fanout for topic" ); // remove explicit peers, peers with negative scores, and backoffed peers @@ -975,9 +974,10 @@ where // Add up to mesh_n of them them to the mesh // NOTE: These aren't randomly added, currently FIFO let add_peers = std::cmp::min(peers.len(), self.config.mesh_n()); - debug!( - "JOIN: Adding {:?} peers from the fanout for topic: {:?}", - add_peers, topic_hash + tracing::debug!( + topic=%topic_hash, + "JOIN: Adding {:?} peers from the fanout for topic", + add_peers ); added_peers.extend(peers.iter().cloned().take(add_peers)); @@ -1012,7 +1012,7 @@ where ); added_peers.extend(new_peers.clone()); // add them to the mesh - debug!( + tracing::debug!( "JOIN: Inserting {:?} random peers into the mesh", new_peers.len() ); @@ -1027,7 +1027,7 @@ where for peer_id in added_peers { // Send a GRAFT control message - debug!("JOIN: Sending Graft message to peer: {:?}", peer_id); + tracing::debug!(peer=%peer_id, "JOIN: Sending Graft message to peer"); if let Some((peer_score, ..)) = &mut self.peer_score { peer_score.graft(&peer_id, topic_hash.clone()); } @@ -1055,7 +1055,7 @@ where m.set_mesh_peers(topic_hash, mesh_peers) } - debug!("Completed JOIN for topic: {:?}", topic_hash); + tracing::debug!(topic=%topic_hash, "Completed JOIN for topic"); } /// Creates a PRUNE gossipsub action. @@ -1072,7 +1072,7 @@ where match self.connected_peers.get(peer).map(|v| &v.kind) { Some(PeerKind::Floodsub) => { - error!("Attempted to prune a Floodsub peer"); + tracing::error!("Attempted to prune a Floodsub peer"); } Some(PeerKind::Gossipsub) => { // GossipSub v1.0 -- no peer exchange, the peer won't be able to parse it anyway @@ -1083,7 +1083,7 @@ where }; } None => { - error!("Attempted to Prune an unknown peer"); + tracing::error!("Attempted to Prune an unknown peer"); } _ => {} // Gossipsub 1.1 peer perform the `Prune` } @@ -1122,7 +1122,7 @@ where /// Gossipsub LEAVE(topic) - Notifies mesh\[topic\] peers with PRUNE messages. fn leave(&mut self, topic_hash: &TopicHash) { - debug!("Running LEAVE for topic {:?}", topic_hash); + tracing::debug!(topic=%topic_hash, "Running LEAVE for topic"); // If our mesh contains the topic, send prune to peers and delete it from the mesh if let Some((_, peers)) = self.mesh.remove_entry(topic_hash) { @@ -1131,7 +1131,7 @@ where } for peer in peers { // Send a PRUNE control message - debug!("LEAVE: Sending PRUNE to peer: {:?}", peer); + tracing::debug!(%peer, "LEAVE: Sending PRUNE to peer"); let on_unsubscribe = true; let control = self.make_prune(topic_hash, &peer, self.config.do_px(), on_unsubscribe); @@ -1148,14 +1148,14 @@ where ); } } - debug!("Completed LEAVE for topic: {:?}", topic_hash); + tracing::debug!(topic=%topic_hash, "Completed LEAVE for topic"); } /// Checks if the given peer is still connected and if not dials the peer again. fn check_explicit_peer_connection(&mut self, peer_id: &PeerId) { if !self.peer_topics.contains_key(peer_id) { // Connect to peer - debug!("Connecting to explicit peer {:?}", peer_id); + tracing::debug!(peer=%peer_id, "Connecting to explicit peer"); self.events.push_back(ToSwarm::Dial { opts: DialOpts::peer_id(*peer_id).build(), }); @@ -1193,9 +1193,10 @@ where fn handle_ihave(&mut self, peer_id: &PeerId, ihave_msgs: Vec<(TopicHash, Vec)>) { // We ignore IHAVE gossip from any peer whose score is below the gossip threshold if let (true, score) = self.score_below_threshold(peer_id, |pst| pst.gossip_threshold) { - debug!( - "IHAVE: ignoring peer {:?} with score below threshold [score = {}]", - peer_id, score + tracing::debug!( + peer=%peer_id, + %score, + "IHAVE: ignoring peer with score below threshold" ); return; } @@ -1204,25 +1205,27 @@ where let peer_have = self.count_received_ihave.entry(*peer_id).or_insert(0); *peer_have += 1; if *peer_have > self.config.max_ihave_messages() { - debug!( - "IHAVE: peer {} has advertised too many times ({}) within this heartbeat \ + tracing::debug!( + peer=%peer_id, + "IHAVE: peer has advertised too many times ({}) within this heartbeat \ interval; ignoring", - peer_id, *peer_have + *peer_have ); return; } if let Some(iasked) = self.count_sent_iwant.get(peer_id) { if *iasked >= self.config.max_ihave_length() { - debug!( - "IHAVE: peer {} has already advertised too many messages ({}); ignoring", - peer_id, *iasked + tracing::debug!( + peer=%peer_id, + "IHAVE: peer has already advertised too many messages ({}); ignoring", + *iasked ); return; } } - trace!("Handling IHAVE for peer: {:?}", peer_id); + tracing::trace!(peer=%peer_id, "Handling IHAVE for peer"); let mut iwant_ids = HashSet::new(); @@ -1244,9 +1247,9 @@ where for (topic, ids) in ihave_msgs { // only process the message if we are subscribed if !self.mesh.contains_key(&topic) { - debug!( - "IHAVE: Ignoring IHAVE - Not subscribed to topic: {:?}", - topic + tracing::debug!( + %topic, + "IHAVE: Ignoring IHAVE - Not subscribed to topic" ); continue; } @@ -1270,11 +1273,11 @@ where } // Send the list of IWANT control messages - debug!( - "IHAVE: Asking for {} out of {} messages from {}", + tracing::debug!( + peer=%peer_id, + "IHAVE: Asking for {} out of {} messages from peer", iask, - iwant_ids.len(), - peer_id + iwant_ids.len() ); // Ask in random order @@ -1297,9 +1300,9 @@ where Instant::now() + self.config.iwant_followup_time(), ); } - trace!( - "IHAVE: Asking for the following messages from {}: {:?}", - peer_id, + tracing::trace!( + peer=%peer_id, + "IHAVE: Asking for the following messages from peer: {:?}", iwant_ids_vec ); @@ -1311,7 +1314,7 @@ where }, ); } - trace!("Completed IHAVE handling for peer: {:?}", peer_id); + tracing::trace!(peer=%peer_id, "Completed IHAVE handling for peer"); } /// Handles an IWANT control message. Checks our cache of messages. If the message exists it is @@ -1319,14 +1322,15 @@ where fn handle_iwant(&mut self, peer_id: &PeerId, iwant_msgs: Vec) { // We ignore IWANT gossip from any peer whose score is below the gossip threshold if let (true, score) = self.score_below_threshold(peer_id, |pst| pst.gossip_threshold) { - debug!( - "IWANT: ignoring peer {:?} with score below threshold [score = {}]", - peer_id, score + tracing::debug!( + peer=%peer_id, + "IWANT: ignoring peer with score below threshold [score = {}]", + score ); return; } - debug!("Handling IWANT for peer: {:?}", peer_id); + tracing::debug!(peer=%peer_id, "Handling IWANT for peer"); // build a hashmap of available messages let mut cached_messages = HashMap::new(); @@ -1335,10 +1339,10 @@ where // cached_messages mapping if let Some((msg, count)) = self.mcache.get_with_iwant_counts(&id, peer_id) { if count > self.config.gossip_retransimission() { - debug!( - "IWANT: Peer {} has asked for message {} too many times; ignoring \ - request", - peer_id, &id + tracing::debug!( + peer=%peer_id, + message=%id, + "IWANT: Peer has asked for message too many times; ignoring request" ); } else { cached_messages.insert(id.clone(), msg.clone()); @@ -1347,7 +1351,7 @@ where } if !cached_messages.is_empty() { - debug!("IWANT: Sending cached messages to peer: {:?}", peer_id); + tracing::debug!(peer=%peer_id, "IWANT: Sending cached messages to peer"); // Send the messages to the peer let message_list: Vec<_> = cached_messages.into_iter().map(|entry| entry.1).collect(); @@ -1366,7 +1370,7 @@ where let msg_bytes = message.get_size(); if self.send_message(*peer_id, message).is_err() { - error!("Failed to send cached messages. Messages too large"); + tracing::error!("Failed to send cached messages. Messages too large"); } else if let Some(m) = self.metrics.as_mut() { // Sending of messages succeeded, register them on the internal metrics. for topic in topics.iter() { @@ -1374,13 +1378,13 @@ where } } } - debug!("Completed IWANT handling for peer: {}", peer_id); + tracing::debug!(peer=%peer_id, "Completed IWANT handling for peer"); } /// Handles GRAFT control messages. If subscribed to the topic, adds the peer to mesh, if not, /// responds with PRUNE messages. fn handle_graft(&mut self, peer_id: &PeerId, topics: Vec) { - debug!("Handling GRAFT message for peer: {}", peer_id); + tracing::debug!(peer=%peer_id, "Handling GRAFT message for peer"); let mut to_prune_topics = HashSet::new(); @@ -1401,7 +1405,7 @@ where // we don't GRAFT to/from explicit peers; complain loudly if this happens if self.explicit_peers.contains(peer_id) { - warn!("GRAFT: ignoring request from direct peer {}", peer_id); + tracing::warn!(peer=%peer_id, "GRAFT: ignoring request from direct peer"); // this is possibly a bug from non-reciprocal configuration; send a PRUNE for all topics to_prune_topics = topics.into_iter().collect(); // but don't PX @@ -1413,9 +1417,10 @@ where if let Some(peers) = self.mesh.get_mut(&topic_hash) { // if the peer is already in the mesh ignore the graft if peers.contains(peer_id) { - debug!( - "GRAFT: Received graft for peer {:?} that is already in topic {:?}", - peer_id, &topic_hash + tracing::debug!( + peer=%peer_id, + topic=%&topic_hash, + "GRAFT: Received graft for peer that is already in topic" ); continue; } @@ -1424,9 +1429,9 @@ where if let Some(backoff_time) = self.backoffs.get_backoff_time(&topic_hash, peer_id) { if backoff_time > now { - warn!( - "[Penalty] Peer attempted graft within backoff time, penalizing {}", - peer_id + tracing::warn!( + peer=%peer_id, + "[Penalty] Peer attempted graft within backoff time, penalizing" ); // add behavioural penalty if let Some((peer_score, ..)) = &mut self.peer_score { @@ -1457,10 +1462,11 @@ where // check the score if below_zero { // we don't GRAFT peers with negative score - debug!( - "GRAFT: ignoring peer {:?} with negative score [score = {}, \ - topic = {}]", - peer_id, score, topic_hash + tracing::debug!( + peer=%peer_id, + %score, + topic=%topic_hash, + "GRAFT: ignoring peer with negative score" ); // we do send them PRUNE however, because it's a matter of protocol correctness to_prune_topics.insert(topic_hash.clone()); @@ -1479,9 +1485,10 @@ where } // add peer to the mesh - debug!( - "GRAFT: Mesh link added for peer: {:?} in topic: {:?}", - peer_id, &topic_hash + tracing::debug!( + peer=%peer_id, + topic=%topic_hash, + "GRAFT: Mesh link added for peer in topic" ); if peers.insert(*peer_id) { @@ -1506,9 +1513,10 @@ where } else { // don't do PX when there is an unknown topic to avoid leaking our peers do_px = false; - debug!( - "GRAFT: Received graft for unknown topic {:?} from peer {:?}", - &topic_hash, peer_id + tracing::debug!( + peer=%peer_id, + topic=%topic_hash, + "GRAFT: Received graft for unknown topic from peer" ); // spam hardening: ignore GRAFTs for unknown topics continue; @@ -1524,9 +1532,9 @@ where .map(|t| self.make_prune(t, peer_id, do_px, on_unsubscribe)) .collect(); // Send the prune messages to the peer - debug!( - "GRAFT: Not subscribed to topics - Sending PRUNE to peer: {}", - peer_id + tracing::debug!( + peer=%peer_id, + "GRAFT: Not subscribed to topics - Sending PRUNE to peer" ); if let Err(e) = self.send_message( @@ -1538,10 +1546,10 @@ where } .into_protobuf(), ) { - error!("Failed to send PRUNE: {:?}", e); + tracing::error!("Failed to send PRUNE: {:?}", e); } } - debug!("Completed GRAFT handling for peer: {}", peer_id); + tracing::debug!(peer=%peer_id, "Completed GRAFT handling for peer"); } fn remove_peer_from_mesh( @@ -1556,10 +1564,10 @@ where if let Some(peers) = self.mesh.get_mut(topic_hash) { // remove the peer if it exists in the mesh if peers.remove(peer_id) { - debug!( - "PRUNE: Removing peer: {} from the mesh for topic: {}", - peer_id.to_string(), - topic_hash + tracing::debug!( + peer=%peer_id, + topic=%topic_hash, + "PRUNE: Removing peer from the mesh for topic" ); if let Some(m) = self.metrics.as_mut() { m.peers_removed(topic_hash, reason, 1) @@ -1599,7 +1607,7 @@ where peer_id: &PeerId, prune_data: Vec<(TopicHash, Vec, Option)>, ) { - debug!("Handling PRUNE message for peer: {}", peer_id); + tracing::debug!(peer=%peer_id, "Handling PRUNE message for peer"); let (below_threshold, score) = self.score_below_threshold(peer_id, |pst| pst.accept_px_threshold); for (topic_hash, px, backoff) in prune_data { @@ -1610,10 +1618,11 @@ where if !px.is_empty() { // we ignore PX from peers with insufficient score if below_threshold { - debug!( - "PRUNE: ignoring PX from peer {:?} with insufficient score \ - [score ={} topic = {}]", - peer_id, score, topic_hash + tracing::debug!( + peer=%peer_id, + %score, + topic=%topic_hash, + "PRUNE: ignoring PX from peer with insufficient score" ); continue; } @@ -1630,7 +1639,7 @@ where } } } - debug!("Completed PRUNE handling for peer: {}", peer_id.to_string()); + tracing::debug!(peer=%peer_id, "Completed PRUNE handling for peer"); } fn px_connect(&mut self, mut px: Vec) { @@ -1670,17 +1679,17 @@ where raw_message: &mut RawMessage, propagation_source: &PeerId, ) -> bool { - debug!( - "Handling message: {:?} from peer: {}", - msg_id, - propagation_source.to_string() + tracing::debug!( + peer=%propagation_source, + message=%msg_id, + "Handling message from peer" ); // Reject any message from a blacklisted peer if self.blacklisted_peers.contains(propagation_source) { - debug!( - "Rejecting message from blacklisted peer: {}", - propagation_source + tracing::debug!( + peer=%propagation_source, + "Rejecting message from blacklisted peer" ); if let Some((peer_score, .., gossip_promises)) = &mut self.peer_score { peer_score.reject_message( @@ -1697,9 +1706,10 @@ where // Also reject any message that originated from a blacklisted peer if let Some(source) = raw_message.source.as_ref() { if self.blacklisted_peers.contains(source) { - debug!( - "Rejecting message from peer {} because of blacklisted source: {}", - propagation_source, source + tracing::debug!( + peer=%propagation_source, + %source, + "Rejecting message from peer because of blacklisted source" ); self.handle_invalid_message( propagation_source, @@ -1727,9 +1737,10 @@ where }; if self_published { - debug!( - "Dropping message {} claiming to be from self but forwarded from {}", - msg_id, propagation_source + tracing::debug!( + message=%msg_id, + source=%propagation_source, + "Dropping message claiming to be from self but forwarded from source" ); self.handle_invalid_message(propagation_source, raw_message, RejectReason::SelfOrigin); return false; @@ -1755,7 +1766,7 @@ where let message = match self.data_transform.inbound_transform(raw_message.clone()) { Ok(message) => message, Err(e) => { - debug!("Invalid message. Transform error: {:?}", e); + tracing::debug!("Invalid message. Transform error: {:?}", e); // Reject the message and return self.handle_invalid_message( propagation_source, @@ -1777,16 +1788,16 @@ where } if !self.duplicate_cache.insert(msg_id.clone()) { - debug!("Message already received, ignoring. Message: {}", msg_id); + tracing::debug!(message=%msg_id, "Message already received, ignoring"); if let Some((peer_score, ..)) = &mut self.peer_score { peer_score.duplicated_message(propagation_source, &msg_id, &message.topic); } self.mcache.observe_duplicate(&msg_id, propagation_source); return; } - debug!( - "Put message {:?} in duplicate_cache and resolve promises", - msg_id + tracing::debug!( + message=%msg_id, + "Put message in duplicate_cache and resolve promises" ); // Record the received message with the metrics @@ -1806,7 +1817,7 @@ where // Dispatch the message to the user if we are subscribed to any of the topics if self.mesh.contains_key(&message.topic) { - debug!("Sending received message to user"); + tracing::debug!("Sending received message to user"); self.events .push_back(ToSwarm::GenerateEvent(Event::Message { propagation_source: *propagation_source, @@ -1814,9 +1825,9 @@ where message, })); } else { - debug!( - "Received message on a topic we are not subscribed to: {:?}", - message.topic + tracing::debug!( + topic=%message.topic, + "Received message on a topic we are not subscribed to" ); return; } @@ -1832,9 +1843,9 @@ where ) .is_err() { - error!("Failed to forward message. Too large"); + tracing::error!("Failed to forward message. Too large"); } - debug!("Completed message handling for message: {:?}", msg_id); + tracing::debug!(message=%msg_id, "Completed message handling for message"); } } @@ -1876,10 +1887,10 @@ where subscriptions: &[Subscription], propagation_source: &PeerId, ) { - debug!( - "Handling subscriptions: {:?}, from source: {}", + tracing::debug!( + source=%propagation_source, + "Handling subscriptions: {:?}", subscriptions, - propagation_source.to_string() ); let mut unsubscribed_peers = Vec::new(); @@ -1887,9 +1898,9 @@ where let subscribed_topics = match self.peer_topics.get_mut(propagation_source) { Some(topics) => topics, None => { - error!( - "Subscription by unknown peer: {}", - propagation_source.to_string() + tracing::error!( + peer=%propagation_source, + "Subscription by unknown peer" ); return; } @@ -1907,10 +1918,10 @@ where { Ok(topics) => topics, Err(s) => { - error!( - "Subscription filter error: {}; ignoring RPC from peer {}", - s, - propagation_source.to_string() + tracing::error!( + peer=%propagation_source, + "Subscription filter error: {}; ignoring RPC from peer", + s ); return; } @@ -1924,10 +1935,10 @@ where match subscription.action { SubscriptionAction::Subscribe => { if peer_list.insert(*propagation_source) { - debug!( - "SUBSCRIPTION: Adding gossip peer: {} to topic: {:?}", - propagation_source.to_string(), - topic_hash + tracing::debug!( + peer=%propagation_source, + topic=%topic_hash, + "SUBSCRIPTION: Adding gossip peer to topic" ); } @@ -1956,19 +1967,19 @@ where if peers.len() < self.config.mesh_n_low() && peers.insert(*propagation_source) { - debug!( - "SUBSCRIPTION: Adding peer {} to the mesh for topic {:?}", - propagation_source.to_string(), - topic_hash + tracing::debug!( + peer=%propagation_source, + topic=%topic_hash, + "SUBSCRIPTION: Adding peer to the mesh for topic" ); if let Some(m) = self.metrics.as_mut() { m.peers_included(topic_hash, Inclusion::Subscribed, 1) } // send graft to the peer - debug!( - "Sending GRAFT to peer {} for topic {:?}", - propagation_source.to_string(), - topic_hash + tracing::debug!( + peer=%propagation_source, + topic=%topic_hash, + "Sending GRAFT to peer for topic" ); if let Some((peer_score, ..)) = &mut self.peer_score { peer_score.graft(propagation_source, topic_hash.clone()); @@ -1985,10 +1996,10 @@ where } SubscriptionAction::Unsubscribe => { if peer_list.remove(propagation_source) { - debug!( - "SUBSCRIPTION: Removing gossip peer: {} from topic: {:?}", - propagation_source.to_string(), - topic_hash + tracing::debug!( + peer=%propagation_source, + topic=%topic_hash, + "SUBSCRIPTION: Removing gossip peer from topic" ); } @@ -2044,7 +2055,7 @@ where ) .is_err() { - error!("Failed sending grafts. Message too large"); + tracing::error!("Failed sending grafts. Message too large"); } // Notify the application of the subscriptions @@ -2052,9 +2063,9 @@ where self.events.push_back(event); } - trace!( - "Completed handling subscriptions from source: {:?}", - propagation_source + tracing::trace!( + source=%propagation_source, + "Completed handling subscriptions from source" ); } @@ -2072,7 +2083,7 @@ where /// Heartbeat function which shifts the memcache and updates the mesh. fn heartbeat(&mut self) { - debug!("Starting heartbeat"); + tracing::debug!("Starting heartbeat"); let start = Instant::now(); self.heartbeat_ticks += 1; @@ -2128,10 +2139,11 @@ where } if peer_score < 0.0 { - debug!( - "HEARTBEAT: Prune peer {:?} with negative score [score = {}, topic = \ - {}]", - peer_id, peer_score, topic_hash + tracing::debug!( + peer=%peer_id, + score=%peer_score, + topic=%topic_hash, + "HEARTBEAT: Prune peer with negative score" ); let current_topic = to_prune.entry(*peer_id).or_insert_with(Vec::new); @@ -2151,9 +2163,9 @@ where // too little peers - add some if peers.len() < self.config.mesh_n_low() { - debug!( - "HEARTBEAT: Mesh low. Topic: {} Contains: {} needs: {}", - topic_hash, + tracing::debug!( + topic=%topic_hash, + "HEARTBEAT: Mesh low. Topic contains: {} needs: {}", peers.len(), self.config.mesh_n_low() ); @@ -2176,7 +2188,7 @@ where current_topic.push(topic_hash.clone()); } // update the mesh - debug!("Updating mesh, new mesh: {:?}", peer_list); + tracing::debug!("Updating mesh, new mesh: {:?}", peer_list); if let Some(m) = self.metrics.as_mut() { m.peers_included(topic_hash, Inclusion::Random, peer_list.len()) } @@ -2185,9 +2197,9 @@ where // too many peers - remove some if peers.len() > self.config.mesh_n_high() { - debug!( - "HEARTBEAT: Mesh high. Topic: {} Contains: {} needs: {}", - topic_hash, + tracing::debug!( + topic=%topic_hash, + "HEARTBEAT: Mesh high. Topic contains: {} needs: {}", peers.len(), self.config.mesh_n_high() ); @@ -2270,7 +2282,7 @@ where current_topic.push(topic_hash.clone()); } // update the mesh - debug!("Updating mesh, new mesh: {:?}", peer_list); + tracing::debug!("Updating mesh, new mesh: {:?}", peer_list); if let Some(m) = self.metrics.as_mut() { m.peers_included(topic_hash, Inclusion::Outbound, peer_list.len()) } @@ -2337,9 +2349,10 @@ where current_topic.push(topic_hash.clone()); } // update the mesh - debug!( - "Opportunistically graft in topic {} with peers {:?}", - topic_hash, peer_list + tracing::debug!( + topic=%topic_hash, + "Opportunistically graft in topic with peers {:?}", + peer_list ); if let Some(m) = self.metrics.as_mut() { m.peers_included(topic_hash, Inclusion::Random, peer_list.len()) @@ -2360,9 +2373,9 @@ where let fanout_ttl = self.config.fanout_ttl(); self.fanout_last_pub.retain(|topic_hash, last_pub_time| { if *last_pub_time + fanout_ttl < Instant::now() { - debug!( - "HEARTBEAT: Fanout topic removed due to timeout. Topic: {:?}", - topic_hash + tracing::debug!( + topic=%topic_hash, + "HEARTBEAT: Fanout topic removed due to timeout" ); fanout.remove(topic_hash); return false; @@ -2385,9 +2398,9 @@ where match self.peer_topics.get(peer) { Some(topics) => { if !topics.contains(topic_hash) || peer_score < publish_threshold { - debug!( - "HEARTBEAT: Peer removed from fanout for topic: {:?}", - topic_hash + tracing::debug!( + topic=%topic_hash, + "HEARTBEAT: Peer removed from fanout for topic" ); to_remove_peers.push(*peer); } @@ -2404,7 +2417,7 @@ where // not enough peers if peers.len() < self.config.mesh_n() { - debug!( + tracing::debug!( "HEARTBEAT: Fanout low. Contains: {:?} needs: {:?}", peers.len(), self.config.mesh_n() @@ -2427,7 +2440,7 @@ where } if self.peer_score.is_some() { - trace!("Mesh message deliveries: {:?}", { + tracing::trace!("Mesh message deliveries: {:?}", { self.mesh .iter() .map(|(t, peers)| { @@ -2466,7 +2479,7 @@ where // shift the memcache self.mcache.shift(); - debug!("Completed Heartbeat"); + tracing::debug!("Completed Heartbeat"); if let Some(metrics) = self.metrics.as_mut() { let duration = u64::try_from(start.elapsed().as_millis()).unwrap_or(u64::MAX); metrics.observe_heartbeat_duration(duration); @@ -2486,7 +2499,7 @@ where // if we are emitting more than GossipSubMaxIHaveLength message_ids, truncate the list if message_ids.len() > self.config.max_ihave_length() { // we do the truncation (with shuffling) per peer below - debug!( + tracing::debug!( "too many messages for gossip; will truncate IHAVE list ({} messages)", message_ids.len() ); @@ -2515,7 +2528,7 @@ where }, ); - debug!("Gossiping IHAVE to {} peers.", to_msg_peers.len()); + tracing::debug!("Gossiping IHAVE to {} peers", to_msg_peers.len()); for peer in to_msg_peers { let mut peer_message_ids = message_ids.clone(); @@ -2610,7 +2623,7 @@ where ) .is_err() { - error!("Failed to send control messages. Message too large"); + tracing::error!("Failed to send control messages. Message too large"); } } @@ -2650,7 +2663,7 @@ where ) .is_err() { - error!("Failed to send prune messages. Message too large"); + tracing::error!("Failed to send prune messages. Message too large"); } } } @@ -2672,7 +2685,7 @@ where } } - debug!("Forwarding message: {:?}", msg_id); + tracing::debug!(message=%msg_id, "Forwarding message"); let mut recipient_peers = HashSet::new(); { @@ -2717,13 +2730,13 @@ where let msg_bytes = event.get_size(); for peer in recipient_peers.iter() { - debug!("Sending message: {:?} to peer {:?}", msg_id, peer); + tracing::debug!(%peer, message=%msg_id, "Sending message to peer"); self.send_message(*peer, event.clone())?; if let Some(m) = self.metrics.as_mut() { m.msg_sent(&message.topic, msg_bytes); } } - debug!("Completed forwarding message"); + tracing::debug!("Completed forwarding message"); Ok(true) } else { Ok(false) @@ -2846,7 +2859,7 @@ where ) .is_err() { - error!("Failed to flush control pool. Message too large"); + tracing::error!("Failed to flush control pool. Message too large"); } } @@ -2913,7 +2926,7 @@ where if object_size + 2 > self.config.max_transmit_size() { // This should not be possible. All received and published messages have already // been vetted to fit within the size. - error!("Individual message too large to fragment"); + tracing::error!("Individual message too large to fragment"); return Err(PublishError::MessageTooLarge); } @@ -3018,9 +3031,9 @@ where if let Some(ip) = get_ip_addr(endpoint.get_remote_address()) { peer_score.add_ip(&peer_id, ip); } else { - trace!( - "Couldn't extract ip from endpoint of peer {} with endpoint {:?}", - peer_id, + tracing::trace!( + peer=%peer_id, + "Couldn't extract ip from endpoint of peer with endpoint {:?}", endpoint ) } @@ -3043,9 +3056,9 @@ where if other_established == 0 { // Ignore connections from blacklisted peers. if self.blacklisted_peers.contains(&peer_id) { - debug!("Ignoring connection from blacklisted peer: {}", peer_id); + tracing::debug!(peer=%peer_id, "Ignoring connection from blacklisted peer"); } else { - debug!("New peer connected: {}", peer_id); + tracing::debug!(peer=%peer_id, "New peer connected"); // We need to send our subscriptions to the newly-connected node. let mut subscriptions = vec![]; for topic_hash in self.mesh.keys() { @@ -3069,7 +3082,7 @@ where ) .is_err() { - error!("Failed to send subscriptions, message too large"); + tracing::error!("Failed to send subscriptions, message too large"); } } } @@ -3098,9 +3111,9 @@ where if let Some(ip) = get_ip_addr(endpoint.get_remote_address()) { peer_score.remove_ip(&peer_id, &ip); } else { - trace!( - "Couldn't extract ip from endpoint of peer {} with endpoint {:?}", - peer_id, + tracing::trace!( + peer=%peer_id, + "Couldn't extract ip from endpoint of peer with endpoint {:?}", endpoint ) } @@ -3137,7 +3150,7 @@ where } } else { // remove from mesh, topic_peers, peer_topic and the fanout - debug!("Peer disconnected: {}", peer_id); + tracing::debug!(peer=%peer_id, "Peer disconnected"); { let topics = match self.peer_topics.get(&peer_id) { Some(topics) => topics, @@ -3167,18 +3180,19 @@ where if let Some(peer_list) = self.topic_peers.get_mut(topic) { if !peer_list.remove(&peer_id) { // debugging purposes - warn!( - "Disconnected node: {} not in topic_peers peer list", - peer_id + tracing::warn!( + peer=%peer_id, + "Disconnected node: peer not in topic_peers" ); } if let Some(m) = self.metrics.as_mut() { m.set_topic_peers(topic, peer_list.len()) } } else { - warn!( - "Disconnected node: {} with topic: {:?} not in topic_peers", - &peer_id, &topic + tracing::warn!( + peer=%peer_id, + topic=%topic, + "Disconnected node: peer with topic not in topic_peers" ); } @@ -3230,18 +3244,18 @@ where if let Some(ip) = get_ip_addr(endpoint_old.get_remote_address()) { peer_score.remove_ip(&peer_id, &ip); } else { - trace!( - "Couldn't extract ip from endpoint of peer {} with endpoint {:?}", - &peer_id, + tracing::trace!( + peer=%&peer_id, + "Couldn't extract ip from endpoint of peer with endpoint {:?}", endpoint_old ) } if let Some(ip) = get_ip_addr(endpoint_new.get_remote_address()) { peer_score.add_ip(&peer_id, ip); } else { - trace!( - "Couldn't extract ip from endpoint of peer {} with endpoint {:?}", - peer_id, + tracing::trace!( + peer=%peer_id, + "Couldn't extract ip from endpoint of peer with endpoint {:?}", endpoint_new ) } @@ -3300,9 +3314,9 @@ where } if let PeerKind::NotSupported = kind { - debug!( - "Peer does not support gossipsub protocols. {}", - propagation_source + tracing::debug!( + peer=%propagation_source, + "Peer does not support gossipsub protocols" ); self.events .push_back(ToSwarm::GenerateEvent(Event::GossipsubNotSupported { @@ -3312,9 +3326,10 @@ where // Only change the value if the old value is Floodsub (the default set in // `NetworkBehaviour::on_event` with FromSwarm::ConnectionEstablished). // All other PeerKind changes are ignored. - debug!( - "New peer type found: {} for peer: {}", - kind, propagation_source + tracing::debug!( + peer=%propagation_source, + peer_type=%kind, + "New peer type found for peer" ); if let PeerKind::Floodsub = conn.kind { conn.kind = kind; @@ -3337,7 +3352,7 @@ where if let (true, _) = self.score_below_threshold(&propagation_source, |pst| pst.graylist_threshold) { - debug!("RPC Dropped from greylisted peer {}", propagation_source); + tracing::debug!(peer=%propagation_source, "RPC Dropped from greylisted peer"); return; } @@ -3353,11 +3368,11 @@ where } else { // log the invalid messages for (message, validation_error) in invalid_messages { - warn!( - "Invalid message. Reason: {:?} propagation_peer {} source {:?}", + tracing::warn!( + peer=%propagation_source, + source=?message.source, + "Invalid message from peer. Reason: {:?}", validation_error, - propagation_source.to_string(), - message.source ); } } @@ -3368,7 +3383,7 @@ where if self.config.max_messages_per_rpc().is_some() && Some(count) >= self.config.max_messages_per_rpc() { - warn!("Received more messages than permitted. Ignoring further messages. Processed: {}", count); + tracing::warn!("Received more messages than permitted. Ignoring further messages. Processed: {}", count); break; } self.handle_received_message(raw_message, &propagation_source); @@ -3411,6 +3426,7 @@ where } } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, @@ -3566,7 +3582,7 @@ fn get_random_peers_dynamic( // if we have less than needed, return them let n = n_map(gossip_peers.len()); if gossip_peers.len() <= n { - debug!("RANDOM PEERS: Got {:?} peers", gossip_peers.len()); + tracing::debug!("RANDOM PEERS: Got {:?} peers", gossip_peers.len()); return gossip_peers.into_iter().collect(); } @@ -3574,7 +3590,7 @@ fn get_random_peers_dynamic( let mut rng = thread_rng(); gossip_peers.partial_shuffle(&mut rng, n); - debug!("RANDOM PEERS: Got {:?} peers", n); + tracing::debug!("RANDOM PEERS: Got {:?} peers", n); gossip_peers.into_iter().take(n).collect() } diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index dba5db4c01d..cf24ed8d8dc 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -4656,7 +4656,10 @@ fn test_limit_number_of_message_ids_inside_ihave() { #[test] fn test_iwant_penalties() { - let _ = env_logger::try_init(); + use tracing_subscriber::EnvFilter; + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let config = ConfigBuilder::default() .iwant_followup_time(Duration::from_secs(4)) diff --git a/protocols/gossipsub/src/gossip_promises.rs b/protocols/gossipsub/src/gossip_promises.rs index 827206afe8d..9538622c0dc 100644 --- a/protocols/gossipsub/src/gossip_promises.rs +++ b/protocols/gossipsub/src/gossip_promises.rs @@ -23,7 +23,6 @@ use crate::MessageId; use crate::ValidationError; use instant::Instant; use libp2p_identity::PeerId; -use log::debug; use std::collections::HashMap; /// Tracks recently sent `IWANT` messages and checks if peers respond to them. @@ -85,9 +84,10 @@ impl GossipPromises { if *expires < now { let count = result.entry(*peer_id).or_insert(0); *count += 1; - debug!( - "[Penalty] The peer {} broke the promise to deliver message {} in time!", - peer_id, msg + tracing::debug!( + peer=%peer_id, + message=%msg, + "[Penalty] The peer broke the promise to deliver message in time!" ); false } else { diff --git a/protocols/gossipsub/src/handler.rs b/protocols/gossipsub/src/handler.rs index 44258bb5394..4f3dd5c9f63 100644 --- a/protocols/gossipsub/src/handler.rs +++ b/protocols/gossipsub/src/handler.rs @@ -187,7 +187,7 @@ impl EnabledHandler { } // new inbound substream. Replace the current one, if it exists. - log::trace!("New inbound substream request"); + tracing::trace!("New inbound substream request"); self.inbound_substream = Some(InboundSubstreamState::WaitingInput(substream)); } @@ -258,7 +258,7 @@ impl EnabledHandler { return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(message)); } Poll::Ready(Some(Err(error))) => { - log::debug!("Failed to read from inbound stream: {error}"); + tracing::debug!("Failed to read from inbound stream: {error}"); // Close this side of the stream. If the // peer is still around, they will re-establish their // outbound stream i.e. our inbound stream. @@ -267,7 +267,7 @@ impl EnabledHandler { } // peer closed the stream Poll::Ready(None) => { - log::debug!("Inbound stream closed by remote"); + tracing::debug!("Inbound stream closed by remote"); self.inbound_substream = Some(InboundSubstreamState::Closing(substream)); } @@ -285,7 +285,7 @@ impl EnabledHandler { // Don't close the connection but just drop the inbound substream. // In case the remote has more to send, they will open up a new // substream. - log::debug!("Inbound substream error while closing: {e}"); + tracing::debug!("Inbound substream error while closing: {e}"); } self.inbound_substream = None; break; @@ -335,14 +335,16 @@ impl EnabledHandler { Some(OutboundSubstreamState::PendingFlush(substream)) } Err(e) => { - log::debug!("Failed to send message on outbound stream: {e}"); + tracing::debug!( + "Failed to send message on outbound stream: {e}" + ); self.outbound_substream = None; break; } } } Poll::Ready(Err(e)) => { - log::debug!("Failed to send message on outbound stream: {e}"); + tracing::debug!("Failed to send message on outbound stream: {e}"); self.outbound_substream = None; break; } @@ -361,7 +363,7 @@ impl EnabledHandler { Some(OutboundSubstreamState::WaitingOutput(substream)) } Poll::Ready(Err(e)) => { - log::debug!("Failed to flush outbound stream: {e}"); + tracing::debug!("Failed to flush outbound stream: {e}"); self.outbound_substream = None; break; } @@ -418,7 +420,7 @@ impl ConnectionHandler for Handler { } }, Handler::Disabled(_) => { - log::debug!("Handler is disabled. Dropping message {:?}", message); + tracing::debug!(?message, "Handler is disabled. Dropping message"); } } } @@ -427,6 +429,7 @@ impl ConnectionHandler for Handler { matches!(self, Handler::Enabled(h) if h.in_mesh) } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, @@ -469,7 +472,7 @@ impl ConnectionHandler for Handler { handler.inbound_substream_attempts += 1; if handler.inbound_substream_attempts == MAX_SUBSTREAM_ATTEMPTS { - log::warn!( + tracing::warn!( "The maximum number of inbound substreams attempts has been exceeded" ); *self = Handler::Disabled(DisabledHandler::MaxSubstreamAttempts); @@ -483,7 +486,7 @@ impl ConnectionHandler for Handler { handler.outbound_substream_attempts += 1; if handler.outbound_substream_attempts == MAX_SUBSTREAM_ATTEMPTS { - log::warn!( + tracing::warn!( "The maximum number of outbound substream attempts has been exceeded" ); *self = Handler::Disabled(DisabledHandler::MaxSubstreamAttempts); @@ -506,7 +509,7 @@ impl ConnectionHandler for Handler { error: StreamUpgradeError::Timeout, .. }) => { - log::debug!("Dial upgrade error: Protocol negotiation timeout"); + tracing::debug!("Dial upgrade error: Protocol negotiation timeout"); } ConnectionEvent::DialUpgradeError(DialUpgradeError { error: StreamUpgradeError::Apply(e), @@ -517,7 +520,7 @@ impl ConnectionHandler for Handler { .. }) => { // The protocol is not supported - log::debug!( + tracing::debug!( "The remote peer does not support gossipsub on this connection" ); *self = Handler::Disabled(DisabledHandler::ProtocolUnsupported { @@ -528,7 +531,7 @@ impl ConnectionHandler for Handler { error: StreamUpgradeError::Io(e), .. }) => { - log::debug!("Protocol negotiation failed: {e}") + tracing::debug!("Protocol negotiation failed: {e}") } ConnectionEvent::AddressChange(_) | ConnectionEvent::ListenUpgradeError(_) diff --git a/protocols/gossipsub/src/mcache.rs b/protocols/gossipsub/src/mcache.rs index e85a5bf9c6a..ef4a93bc936 100644 --- a/protocols/gossipsub/src/mcache.rs +++ b/protocols/gossipsub/src/mcache.rs @@ -21,7 +21,6 @@ use crate::topic::TopicHash; use crate::types::{MessageId, RawMessage}; use libp2p_identity::PeerId; -use log::{debug, trace}; use std::collections::hash_map::Entry; use std::fmt::Debug; use std::{ @@ -87,7 +86,7 @@ impl MessageCache { entry.insert((msg, HashSet::default())); self.history[0].push(cache_entry); - trace!("Put message {:?} in mcache", message_id); + tracing::trace!(message=?message_id, "Put message in mcache"); true } } @@ -191,13 +190,13 @@ impl MessageCache { // If GossipsubConfig::validate_messages is true, the implementing // application has to ensure that Gossipsub::validate_message gets called for // each received message within the cache timeout time." - debug!( - "The message with id {} got removed from the cache without being validated.", - &entry.mid + tracing::debug!( + message=%&entry.mid, + "The message got removed from the cache without being validated." ); } } - trace!("Remove message from the cache: {}", &entry.mid); + tracing::trace!(message=%&entry.mid, "Remove message from the cache"); self.iwant_counts.remove(&entry.mid); } diff --git a/protocols/gossipsub/src/peer_score.rs b/protocols/gossipsub/src/peer_score.rs index c6c918d6b2a..b370d2dfe06 100644 --- a/protocols/gossipsub/src/peer_score.rs +++ b/protocols/gossipsub/src/peer_score.rs @@ -26,7 +26,6 @@ use crate::time_cache::TimeCache; use crate::{MessageId, TopicHash}; use instant::Instant; use libp2p_identity::PeerId; -use log::{debug, trace, warn}; use std::collections::{hash_map, HashMap, HashSet}; use std::net::IpAddr; use std::time::Duration; @@ -274,13 +273,12 @@ impl PeerScore { if let Some(metrics) = metrics.as_mut() { metrics.register_score_penalty(Penalty::MessageDeficit); } - debug!( - "[Penalty] The peer {} has a mesh message deliveries deficit of {} in topic\ - {} and will get penalized by {}", - peer_id, - deficit, - topic, - p3 * topic_params.mesh_message_deliveries_weight + tracing::debug!( + peer=%peer_id, + %topic, + %deficit, + penalty=%topic_score, + "[Penalty] The peer has a mesh deliveries deficit and will be penalized" ); } @@ -326,10 +324,11 @@ impl PeerScore { if let Some(metrics) = metrics.as_mut() { metrics.register_score_penalty(Penalty::IPColocation); } - debug!( - "[Penalty] The peer {} gets penalized because of too many peers with the ip {}. \ - The surplus is {}. ", - peer_id, ip, surplus + tracing::debug!( + peer=%peer_id, + surplus_ip=%ip, + surplus=%surplus, + "[Penalty] The peer gets penalized because of too many peers with the same ip" ); score += p6 * self.params.ip_colocation_factor_weight; } @@ -347,9 +346,10 @@ impl PeerScore { pub(crate) fn add_penalty(&mut self, peer_id: &PeerId, count: usize) { if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { - debug!( - "[Penalty] Behavioral penalty for peer {}, count = {}.", - peer_id, count + tracing::debug!( + peer=%peer_id, + %count, + "[Penalty] Behavioral penalty for peer" ); peer_stats.behaviour_penalty += count as f64; } @@ -445,7 +445,7 @@ impl PeerScore { /// Adds a new ip to a peer, if the peer is not yet known creates a new peer_stats entry for it pub(crate) fn add_ip(&mut self, peer_id: &PeerId, ip: IpAddr) { - trace!("Add ip for peer {}, ip: {}", peer_id, ip); + tracing::trace!(peer=%peer_id, %ip, "Add ip for peer"); let peer_stats = self.peer_stats.entry(*peer_id).or_default(); // Mark the peer as connected (currently the default is connected, but we don't want to @@ -462,20 +462,20 @@ impl PeerScore { if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { peer_stats.known_ips.remove(ip); if let Some(peer_ids) = self.peer_ips.get_mut(ip) { - trace!("Remove ip for peer {}, ip: {}", peer_id, ip); + tracing::trace!(peer=%peer_id, %ip, "Remove ip for peer"); peer_ids.remove(peer_id); } else { - trace!( - "No entry in peer_ips for ip {} which should get removed for peer {}", - ip, - peer_id + tracing::trace!( + peer=%peer_id, + %ip, + "No entry in peer_ips for ip which should get removed for peer" ); } } else { - trace!( - "No peer_stats for peer {} which should remove the ip {}", - peer_id, - ip + tracing::trace!( + peer=%peer_id, + %ip, + "No peer_stats for peer which should remove the ip" ); } } @@ -594,7 +594,12 @@ impl PeerScore { // this should be the first delivery trace if record.status != DeliveryStatus::Unknown { - warn!("Unexpected delivery trace: Message from {} was first seen {}s ago and has a delivery status {:?}", from, record.first_seen.elapsed().as_secs(), record.status); + tracing::warn!( + peer=%from, + status=?record.status, + first_seen=?record.first_seen.elapsed().as_secs(), + "Unexpected delivery trace" + ); return; } @@ -611,9 +616,9 @@ impl PeerScore { /// Similar to `reject_message` except does not require the message id or reason for an invalid message. pub(crate) fn reject_invalid_message(&mut self, from: &PeerId, topic_hash: &TopicHash) { - debug!( - "[Penalty] Message from {} rejected because of ValidationError or SelfOrigin", - from + tracing::debug!( + peer=%from, + "[Penalty] Message from peer rejected because of ValidationError or SelfOrigin" ); self.mark_invalid_message_delivery(from, topic_hash); @@ -778,10 +783,11 @@ impl PeerScore { if let Some(topic_stats) = peer_stats.stats_or_default_mut(topic_hash.clone(), &self.params) { - debug!( - "[Penalty] Peer {} delivered an invalid message in topic {} and gets penalized \ + tracing::debug!( + peer=%peer_id, + topic=%topic_hash, + "[Penalty] Peer delivered an invalid message in topic and gets penalized \ for it", - peer_id, topic_hash ); topic_stats.invalid_message_deliveries += 1f64; } diff --git a/protocols/gossipsub/src/protocol.rs b/protocols/gossipsub/src/protocol.rs index 15d2f59755a..dcd509f6aa9 100644 --- a/protocols/gossipsub/src/protocol.rs +++ b/protocols/gossipsub/src/protocol.rs @@ -34,7 +34,6 @@ use futures::prelude::*; use libp2p_core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use libp2p_identity::{PeerId, PublicKey}; use libp2p_swarm::StreamProtocol; -use log::{debug, warn}; use quick_protobuf::Writer; use std::pin::Pin; use unsigned_varint::codec; @@ -169,7 +168,7 @@ impl GossipsubCodec { let from = match message.from.as_ref() { Some(v) => v, None => { - debug!("Signature verification failed: No source id given"); + tracing::debug!("Signature verification failed: No source id given"); return false; } }; @@ -177,7 +176,7 @@ impl GossipsubCodec { let source = match PeerId::from_bytes(from) { Ok(v) => v, Err(_) => { - debug!("Signature verification failed: Invalid Peer Id"); + tracing::debug!("Signature verification failed: Invalid Peer Id"); return false; } }; @@ -185,7 +184,7 @@ impl GossipsubCodec { let signature = match message.signature.as_ref() { Some(v) => v, None => { - debug!("Signature verification failed: No signature provided"); + tracing::debug!("Signature verification failed: No signature provided"); return false; } }; @@ -197,7 +196,7 @@ impl GossipsubCodec { _ => match PublicKey::try_decode_protobuf(&source.to_bytes()[2..]) { Ok(v) => v, Err(_) => { - warn!("Signature verification failed: No valid public key supplied"); + tracing::warn!("Signature verification failed: No valid public key supplied"); return false; } }, @@ -205,7 +204,9 @@ impl GossipsubCodec { // The key must match the peer_id if source != public_key.to_peer_id() { - warn!("Signature verification failed: Public key doesn't match source peer id"); + tracing::warn!( + "Signature verification failed: Public key doesn't match source peer id" + ); return false; } @@ -276,13 +277,17 @@ impl Decoder for GossipsubCodec { } ValidationMode::Anonymous => { if message.signature.is_some() { - warn!("Signature field was non-empty and anonymous validation mode is set"); + tracing::warn!( + "Signature field was non-empty and anonymous validation mode is set" + ); invalid_kind = Some(ValidationError::SignaturePresent); } else if message.seqno.is_some() { - warn!("Sequence number was non-empty and anonymous validation mode is set"); + tracing::warn!( + "Sequence number was non-empty and anonymous validation mode is set" + ); invalid_kind = Some(ValidationError::SequenceNumberPresent); } else if message.from.is_some() { - warn!("Message dropped. Message source was non-empty and anonymous validation mode is set"); + tracing::warn!("Message dropped. Message source was non-empty and anonymous validation mode is set"); invalid_kind = Some(ValidationError::MessageSourcePresent); } } @@ -308,7 +313,7 @@ impl Decoder for GossipsubCodec { // verify message signatures if required if verify_signature && !GossipsubCodec::verify_signature(&message) { - warn!("Invalid signature for received message"); + tracing::warn!("Invalid signature for received message"); // Build the invalid message (ignoring further validation of sequence number // and source) @@ -332,10 +337,10 @@ impl Decoder for GossipsubCodec { if seq_no.is_empty() { None } else if seq_no.len() != 8 { - debug!( - "Invalid sequence number length for received message. SeqNo: {:?} Size: {}", - seq_no, - seq_no.len() + tracing::debug!( + sequence_number=?seq_no, + sequence_length=%seq_no.len(), + "Invalid sequence number length for received message" ); let message = RawMessage { source: None, // don't bother inform the application @@ -355,7 +360,7 @@ impl Decoder for GossipsubCodec { } } else { // sequence number was not present - debug!("Sequence number not present but expected"); + tracing::debug!("Sequence number not present but expected"); let message = RawMessage { source: None, // don't bother inform the application data: message.data.unwrap_or_default(), @@ -381,7 +386,7 @@ impl Decoder for GossipsubCodec { Ok(peer_id) => Some(peer_id), // valid peer id Err(_) => { // invalid peer id, add to invalid messages - debug!("Message source has an invalid PeerId"); + tracing::debug!("Message source has an invalid PeerId"); let message = RawMessage { source: None, // don't bother inform the application data: message.data.unwrap_or_default(), diff --git a/protocols/gossipsub/src/subscription_filter.rs b/protocols/gossipsub/src/subscription_filter.rs index 9f883f12a1b..09c323d7904 100644 --- a/protocols/gossipsub/src/subscription_filter.rs +++ b/protocols/gossipsub/src/subscription_filter.rs @@ -20,7 +20,6 @@ use crate::types::Subscription; use crate::TopicHash; -use log::debug; use std::collections::{BTreeSet, HashMap, HashSet}; pub trait TopicSubscriptionFilter { @@ -66,7 +65,7 @@ pub trait TopicSubscriptionFilter { if self.allow_incoming_subscription(s) { true } else { - debug!("Filtered incoming subscription {:?}", s); + tracing::debug!(subscription=?s, "Filtered incoming subscription"); false } }); diff --git a/protocols/gossipsub/tests/smoke.rs b/protocols/gossipsub/tests/smoke.rs index e8577bc78cf..c8876428b4e 100644 --- a/protocols/gossipsub/tests/smoke.rs +++ b/protocols/gossipsub/tests/smoke.rs @@ -25,11 +25,10 @@ use libp2p_gossipsub as gossipsub; use libp2p_gossipsub::{MessageAuthenticity, ValidationMode}; use libp2p_swarm::Swarm; use libp2p_swarm_test::SwarmExt as _; -use log::debug; use quickcheck::{QuickCheck, TestResult}; use rand::{seq::SliceRandom, SeedableRng}; use std::{task::Poll, time::Duration}; - +use tracing_subscriber::EnvFilter; struct Graph { nodes: SelectAll>, } @@ -129,14 +128,16 @@ async fn build_node() -> Swarm { #[test] fn multi_hop_propagation() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); fn prop(num_nodes: u8, seed: u64) -> TestResult { if !(2..=50).contains(&num_nodes) { return TestResult::discard(); } - debug!("number nodes: {:?}, seed: {:?}", num_nodes, seed); + tracing::debug!(number_of_nodes=%num_nodes, seed=%seed); async_std::task::block_on(async move { let mut graph = Graph::new_connected(num_nodes as usize, seed).await; diff --git a/protocols/identify/Cargo.toml b/protocols/identify/Cargo.toml index 6db132b0189..9b0ef0eb139 100644 --- a/protocols/identify/Cargo.toml +++ b/protocols/identify/Cargo.toml @@ -18,20 +18,20 @@ futures-bounded = { workspace = true } libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4.20" lru = "0.12.0" quick-protobuf-codec = { workspace = true } quick-protobuf = "0.8" smallvec = "1.11.1" thiserror = "1.0" +tracing = "0.1.37" void = "1.0" either = "1.9.0" [dev-dependencies] async-std = { version = "1.6.2", features = ["attributes"] } -env_logger = "0.10" libp2p-swarm-test = { path = "../../swarm-test" } libp2p-swarm = { workspace = true, features = ["macros"] } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/identify/src/behaviour.rs b/protocols/identify/src/behaviour.rs index 4f017dd1a9e..75ddfc812bf 100644 --- a/protocols/identify/src/behaviour.rs +++ b/protocols/identify/src/behaviour.rs @@ -168,7 +168,7 @@ impl Behaviour { { for p in peers { if !self.connected.contains_key(&p) { - log::debug!("Not pushing to {p} because we are not connected"); + tracing::debug!(peer=%p, "Not pushing to peer because we are not connected"); continue; } @@ -286,9 +286,10 @@ impl NetworkBehaviour for Behaviour { // No-op, we already observed this address. } Entry::Occupied(mut already_observed) => { - log::info!( - "Our observed address on connection {id} changed from {} to {observed}", - already_observed.get() + tracing::info!( + old_address=%already_observed.get(), + new_address=%observed, + "Our observed address on connection {id} changed", ); *already_observed.get_mut() = observed.clone(); @@ -312,6 +313,7 @@ impl NetworkBehaviour for Behaviour { } } + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self))] fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { if let Some(event) = self.events.pop_front() { return Poll::Ready(event); diff --git a/protocols/identify/src/handler.rs b/protocols/identify/src/handler.rs index 966c7b378e0..963397e2274 100644 --- a/protocols/identify/src/handler.rs +++ b/protocols/identify/src/handler.rs @@ -36,10 +36,10 @@ use libp2p_swarm::{ ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, StreamUpgradeError, SubstreamProtocol, SupportedProtocols, }; -use log::{warn, Level}; use smallvec::SmallVec; use std::collections::HashSet; use std::{io, task::Context, task::Poll, time::Duration}; +use tracing::Level; const STREAM_TIMEOUT: Duration = Duration::from_secs(60); const MAX_CONCURRENT_STREAMS_PER_CONNECTION: usize = 10; @@ -167,7 +167,7 @@ impl Handler { ) .is_err() { - warn!("Dropping inbound stream because we are at capacity"); + tracing::warn!("Dropping inbound stream because we are at capacity"); } else { self.exchanged_one_periodic_identify = true; } @@ -178,7 +178,9 @@ impl Handler { .try_push(protocol::recv_push(stream).map_ok(Success::ReceivedIdentifyPush)) .is_err() { - warn!("Dropping inbound identify push stream because we are at capacity"); + tracing::warn!( + "Dropping inbound identify push stream because we are at capacity" + ); } } } @@ -200,7 +202,7 @@ impl Handler { .try_push(protocol::recv_identify(stream).map_ok(Success::ReceivedIdentify)) .is_err() { - warn!("Dropping outbound identify stream because we are at capacity"); + tracing::warn!("Dropping outbound identify stream because we are at capacity"); } } future::Either::Right(stream) => { @@ -213,7 +215,9 @@ impl Handler { ) .is_err() { - warn!("Dropping outbound identify push stream because we are at capacity"); + tracing::warn!( + "Dropping outbound identify push stream because we are at capacity" + ); } } } @@ -312,6 +316,7 @@ impl ConnectionHandler for Handler { } } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, @@ -406,18 +411,20 @@ impl ConnectionHandler for Handler { | ConnectionEvent::ListenUpgradeError(_) | ConnectionEvent::RemoteProtocolsChange(_) => {} ConnectionEvent::LocalProtocolsChange(change) => { - let before = log::log_enabled!(Level::Debug) + let before = tracing::enabled!(Level::DEBUG) .then(|| self.local_protocols_to_string()) .unwrap_or_default(); let protocols_changed = self.local_supported_protocols.on_protocols_change(change); - let after = log::log_enabled!(Level::Debug) + let after = tracing::enabled!(Level::DEBUG) .then(|| self.local_protocols_to_string()) .unwrap_or_default(); if protocols_changed && self.exchanged_one_periodic_identify { - log::debug!( - "Supported listen protocols changed from [{before}] to [{after}], pushing to {}", - self.remote_peer_id + tracing::debug!( + peer=%self.remote_peer_id, + %before, + %after, + "Supported listen protocols changed, pushing to peer" ); self.events diff --git a/protocols/identify/src/protocol.rs b/protocols/identify/src/protocol.rs index 803b79bf79c..c6b22b00c0a 100644 --- a/protocols/identify/src/protocol.rs +++ b/protocols/identify/src/protocol.rs @@ -25,7 +25,6 @@ use libp2p_core::{multiaddr, Multiaddr}; use libp2p_identity as identity; use libp2p_identity::PublicKey; use libp2p_swarm::StreamProtocol; -use log::{debug, trace}; use std::convert::TryFrom; use std::io; use thiserror::Error; @@ -94,7 +93,7 @@ pub(crate) async fn send_identify(io: T, info: Info) -> Result>) -> Vec { .filter_map(|bytes| match Multiaddr::try_from(bytes) { Ok(a) => Some(a), Err(e) => { - debug!("Unable to parse multiaddr: {e:?}"); + tracing::debug!("Unable to parse multiaddr: {e:?}"); None } }) @@ -181,7 +180,7 @@ fn parse_protocols(protocols: Vec) -> Vec { .filter_map(|p| match StreamProtocol::try_from_owned(p) { Ok(p) => Some(p), Err(e) => { - debug!("Received invalid protocol from peer: {e}"); + tracing::debug!("Received invalid protocol from peer: {e}"); None } }) @@ -192,7 +191,7 @@ fn parse_public_key(public_key: Option>) -> Option { public_key.and_then(|key| match PublicKey::try_decode_protobuf(&key) { Ok(k) => Some(k), Err(e) => { - debug!("Unable to decode public key: {e:?}"); + tracing::debug!("Unable to decode public key: {e:?}"); None } }) @@ -202,7 +201,7 @@ fn parse_observed_addr(observed_addr: Option>) -> Option { observed_addr.and_then(|bytes| match Multiaddr::try_from(bytes) { Ok(a) => Some(a), Err(e) => { - debug!("Unable to parse observed multiaddr: {e:?}"); + tracing::debug!("Unable to parse observed multiaddr: {e:?}"); None } }) diff --git a/protocols/identify/tests/smoke.rs b/protocols/identify/tests/smoke.rs index 9a61ccccdd4..5cccc09d863 100644 --- a/protocols/identify/tests/smoke.rs +++ b/protocols/identify/tests/smoke.rs @@ -6,10 +6,13 @@ use libp2p_swarm_test::SwarmExt; use std::collections::HashSet; use std::iter; use std::time::{Duration, Instant}; +use tracing_subscriber::EnvFilter; #[async_std::test] async fn periodic_identify() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut swarm1 = Swarm::new_ephemeral(|identity| { identify::Behaviour::new( @@ -83,7 +86,9 @@ async fn periodic_identify() { } #[async_std::test] async fn only_emits_address_candidate_once_per_connection() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut swarm1 = Swarm::new_ephemeral(|identity| { identify::Behaviour::new( @@ -153,7 +158,9 @@ async fn only_emits_address_candidate_once_per_connection() { #[async_std::test] async fn identify_push() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut swarm1 = Swarm::new_ephemeral(|identity| { identify::Behaviour::new(identify::Config::new("a".to_string(), identity.public())) @@ -203,7 +210,9 @@ async fn identify_push() { #[async_std::test] async fn discover_peer_after_disconnect() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut swarm1 = Swarm::new_ephemeral(|identity| { identify::Behaviour::new(identify::Config::new("a".to_string(), identity.public())) @@ -254,7 +263,9 @@ async fn discover_peer_after_disconnect() { #[async_std::test] async fn configured_interval_starts_after_first_identify() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let identify_interval = Duration::from_secs(5); diff --git a/protocols/kad/Cargo.toml b/protocols/kad/Cargo.toml index 213cdc9623d..9410ff0eebe 100644 --- a/protocols/kad/Cargo.toml +++ b/protocols/kad/Cargo.toml @@ -17,7 +17,6 @@ either = "1.9" fnv = "1.0" asynchronous-codec = "0.6" futures = "0.3.29" -log = "0.4" libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } quick-protobuf = "0.8" @@ -33,10 +32,10 @@ futures-timer = "3.0.2" instant = "0.1.12" serde = { version = "1.0", optional = true, features = ["derive"] } thiserror = "1" +tracing = "0.1.37" [dev-dependencies] async-std = { version = "1.12.0", features = ["attributes"] } -env_logger = "0.10.0" futures-timer = "3.0" libp2p-identify = { path = "../identify" } libp2p-noise = { workspace = true } @@ -44,6 +43,7 @@ libp2p-swarm = { path = "../../swarm", features = ["macros"] } libp2p-swarm-test = { path = "../../swarm-test" } libp2p-yamux = { workspace = true } quickcheck = { workspace = true } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [features] serde = ["dep:serde", "bytes/serde"] diff --git a/protocols/kad/src/behaviour.rs b/protocols/kad/src/behaviour.rs index 0b187955e39..cc80b9c1be9 100644 --- a/protocols/kad/src/behaviour.rs +++ b/protocols/kad/src/behaviour.rs @@ -47,7 +47,6 @@ use libp2p_swarm::{ ListenAddresses, NetworkBehaviour, NotifyHandler, StreamProtocol, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use log::{debug, info, warn}; use smallvec::SmallVec; use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; use std::fmt; @@ -56,6 +55,7 @@ use std::task::{Context, Poll, Waker}; use std::time::Duration; use std::vec; use thiserror::Error; +use tracing::Level; pub use crate::query::QueryStats; @@ -561,7 +561,7 @@ where RoutingUpdate::Success } kbucket::InsertResult::Full => { - debug!("Bucket full. Peer not added to routing table: {}", peer); + tracing::debug!(%peer, "Bucket full. Peer not added to routing table"); RoutingUpdate::Failed } kbucket::InsertResult::Pending { disconnected } => { @@ -1012,7 +1012,7 @@ where let num_connections = self.connections.len(); - log::debug!( + tracing::debug!( "Re-configuring {} established connection{}", num_connections, if num_connections > 1 { "s" } else { "" } @@ -1037,7 +1037,7 @@ where self.mode = match (self.external_addresses.as_slice(), self.mode) { ([], Mode::Server) => { - log::debug!("Switching to client-mode because we no longer have any confirmed external addresses"); + tracing::debug!("Switching to client-mode because we no longer have any confirmed external addresses"); Mode::Client } @@ -1047,11 +1047,11 @@ where Mode::Client } (confirmed_external_addresses, Mode::Client) => { - if log::log_enabled!(log::Level::Debug) { + if tracing::enabled!(Level::DEBUG) { let confirmed_external_addresses = to_comma_separated_list(confirmed_external_addresses); - log::debug!("Switching to server-mode assuming that one of [{confirmed_external_addresses}] is externally reachable"); + tracing::debug!("Switching to server-mode assuming that one of [{confirmed_external_addresses}] is externally reachable"); } Mode::Server @@ -1086,13 +1086,13 @@ where let local_id = self.kbuckets.local_key().preimage(); let others_iter = peers.filter(|p| &p.node_id != local_id); if let Some(query) = self.queries.get_mut(query_id) { - log::trace!("Request to {:?} in query {:?} succeeded.", source, query_id); + tracing::trace!(peer=%source, query=?query_id, "Request to peer in query succeeded"); for peer in others_iter.clone() { - log::trace!( - "Peer {:?} reported by {:?} in query {:?}.", - peer, - source, - query_id + tracing::trace!( + ?peer, + %source, + query=?query_id, + "Peer reported by source in query" ); let addrs = peer.multiaddrs.iter().cloned().collect(); query.inner.addresses.insert(peer.node_id, addrs); @@ -1282,7 +1282,10 @@ where self.queued_events.push_back(ToSwarm::GenerateEvent(event)); } kbucket::InsertResult::Full => { - debug!("Bucket full. Peer not added to routing table: {}", peer); + tracing::debug!( + %peer, + "Bucket full. Peer not added to routing table" + ); let address = addresses.first().clone(); self.queued_events.push_back(ToSwarm::GenerateEvent( Event::RoutablePeer { peer, address }, @@ -1319,7 +1322,7 @@ where /// Handles a finished (i.e. successful) query. fn query_finished(&mut self, q: Query) -> Option { let query_id = q.id(); - log::trace!("Query {:?} finished.", query_id); + tracing::trace!(query=?query_id, "Query finished"); let result = q.into_result(); match result.inner.info { QueryInfo::Bootstrap { @@ -1546,7 +1549,7 @@ where step: ProgressStep::first_and_last(), }), PutRecordContext::Replicate => { - debug!("Record replicated: {:?}", record.key); + tracing::debug!(record=?record.key, "Record replicated"); None } } @@ -1557,7 +1560,7 @@ where /// Handles a query that timed out. fn query_timeout(&mut self, query: Query) -> Option { let query_id = query.id(); - log::trace!("Query {:?} timed out.", query_id); + tracing::trace!(query=?query_id, "Query timed out"); let result = query.into_result(); match result.inner.info { QueryInfo::Bootstrap { @@ -1655,11 +1658,14 @@ where }), PutRecordContext::Replicate => match phase { PutRecordPhase::GetClosestPeers => { - warn!("Locating closest peers for replication failed: {:?}", err); + tracing::warn!( + "Locating closest peers for replication failed: {:?}", + err + ); None } PutRecordPhase::PutRecord { .. } => { - debug!("Replicating record failed: {:?}", err); + tracing::debug!("Replicating record failed: {:?}", err); None } }, @@ -1759,9 +1765,9 @@ where match self.record_filtering { StoreInserts::Unfiltered => match self.store.put(record.clone()) { Ok(()) => { - debug!( - "Record stored: {:?}; {} bytes", - record.key, + tracing::debug!( + record=?record.key, + "Record stored: {} bytes", record.value.len() ); self.queued_events.push_back(ToSwarm::GenerateEvent( @@ -1775,7 +1781,7 @@ where )); } Err(e) => { - info!("Record not stored: {:?}", e); + tracing::info!("Record not stored: {:?}", e); self.queued_events.push_back(ToSwarm::NotifyHandler { peer_id: source, handler: NotifyHandler::One(connection), @@ -1828,7 +1834,7 @@ where match self.record_filtering { StoreInserts::Unfiltered => { if let Err(e) = self.store.add_provider(record) { - info!("Provider record not stored: {:?}", e); + tracing::info!("Provider record not stored: {:?}", e); return; } @@ -1859,9 +1865,10 @@ where // of the error is not possible (and also not truly desirable or ergonomic). // The error passed in should rather be a dedicated enum. if addrs.remove(address).is_ok() { - debug!( - "Address '{}' removed from peer '{}' due to error.", - address, peer_id + tracing::debug!( + peer=%peer_id, + %address, + "Address removed from peer due to error." ); } else { // Despite apparently having no reachable address (any longer), @@ -1873,10 +1880,11 @@ where // into the same bucket. This is handled transparently by the // `KBucketsTable` and takes effect through `KBucketsTable::take_applied_pending` // within `Behaviour::poll`. - debug!( - "Last remaining address '{}' of peer '{}' is unreachable.", - address, peer_id, - ) + tracing::debug!( + peer=%peer_id, + %address, + "Last remaining address of peer is unreachable." + ); } } @@ -1920,22 +1928,27 @@ where // Update routing table. if let Some(addrs) = self.kbuckets.entry(&kbucket::Key::from(peer)).value() { if addrs.replace(old, new) { - debug!( - "Address '{}' replaced with '{}' for peer '{}'.", - old, new, peer + tracing::debug!( + %peer, + old_address=%old, + new_address=%new, + "Old address replaced with new address for peer." ); } else { - debug!( - "Address '{}' not replaced with '{}' for peer '{}' as old address wasn't \ - present.", - old, new, peer + tracing::debug!( + %peer, + old_address=%old, + new_address=%new, + "Old address not replaced with new address for peer as old address wasn't present.", ); } } else { - debug!( - "Address '{}' not replaced with '{}' for peer '{}' as peer is not present in the \ - routing table.", - old, new, peer + tracing::debug!( + %peer, + old_address=%old, + new_address=%new, + "Old address not replaced with new address for peer as peer is not present in the \ + routing table." ); } @@ -2073,7 +2086,6 @@ where connected_point, peer, self.mode, - connection_id, ); self.preload_new_handler(&mut handler, connection_id, peer); @@ -2097,7 +2109,6 @@ where connected_point, peer, self.mode, - connection_id, ); self.preload_new_handler(&mut handler, connection_id, peer); @@ -2253,12 +2264,11 @@ where } } } - HandlerEvent::QueryError { query_id, error } => { - log::debug!( - "Request to {:?} in query {:?} failed with {:?}", - source, - query_id, + tracing::debug!( + peer=%source, + query=?query_id, + "Request to peer in query failed with {:?}", error ); // If the query to which the error relates is still active, @@ -2346,7 +2356,7 @@ where *step = step.next(); } else { - log::trace!("Record with key {:?} not found at {}", key, source); + tracing::trace!(record=?key, %source, "Record not found at source"); if let Caching::Enabled { max_peers } = self.caching { let source_key = kbucket::Key::from(source); let target_key = kbucket::Key::from(key.clone()); @@ -2387,13 +2397,13 @@ where let peers = success.clone(); let finished = query.try_finish(peers.iter()); if !finished { - debug!( - "PutRecord query ({:?}) reached quorum ({}/{}) with response \ - from peer {} but could not yet finish.", - query_id, + tracing::debug!( + peer=%source, + query=?query_id, + "PutRecord query reached quorum ({}/{}) with response \ + from peer but could not yet finish.", peers.len(), quorum, - source, ); } } @@ -2403,6 +2413,7 @@ where }; } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, diff --git a/protocols/kad/src/behaviour/test.rs b/protocols/kad/src/behaviour/test.rs index f75c59b64b0..522eebcba92 100644 --- a/protocols/kad/src/behaviour/test.rs +++ b/protocols/kad/src/behaviour/test.rs @@ -321,7 +321,9 @@ fn query_iter() { #[test] fn unresponsive_not_returned_direct() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); // Build one node. It contains fake addresses to non-existing nodes. We ask it to find a // random peer. We make sure that no fake address is returned. diff --git a/protocols/kad/src/handler.rs b/protocols/kad/src/handler.rs index fce77bc13e4..0f36800a904 100644 --- a/protocols/kad/src/handler.rs +++ b/protocols/kad/src/handler.rs @@ -33,10 +33,9 @@ use libp2p_swarm::handler::{ ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, }; use libp2p_swarm::{ - ConnectionHandler, ConnectionHandlerEvent, ConnectionId, Stream, StreamUpgradeError, - SubstreamProtocol, SupportedProtocols, + ConnectionHandler, ConnectionHandlerEvent, Stream, StreamUpgradeError, SubstreamProtocol, + SupportedProtocols, }; -use log::trace; use std::collections::VecDeque; use std::task::Waker; use std::{error, fmt, io, marker::PhantomData, pin::Pin, task::Context, task::Poll}; @@ -84,9 +83,6 @@ pub struct Handler { protocol_status: Option, remote_supported_protocols: SupportedProtocols, - - /// The ID of this connection. - connection_id: ConnectionId, } /// The states of protocol confirmation that a connection @@ -459,17 +455,20 @@ impl Handler { endpoint: ConnectedPoint, remote_peer_id: PeerId, mode: Mode, - connection_id: ConnectionId, ) -> Self { match &endpoint { ConnectedPoint::Dialer { .. } => { - log::debug!( - "Operating in {mode}-mode on new outbound connection to {remote_peer_id}" + tracing::debug!( + peer=%remote_peer_id, + mode=%mode, + "New outbound connection" ); } ConnectedPoint::Listener { .. } => { - log::debug!( - "Operating in {mode}-mode on new inbound connection to {remote_peer_id}" + tracing::debug!( + peer=%remote_peer_id, + mode=%mode, + "New inbound connection" ); } } @@ -486,7 +485,6 @@ impl Handler { pending_messages: Default::default(), protocol_status: None, remote_supported_protocols: Default::default(), - connection_id, } } @@ -550,16 +548,16 @@ impl Handler { ) }) { *s = InboundSubstreamState::Cancelled; - log::debug!( - "New inbound substream to {:?} exceeds inbound substream limit. \ - Removed older substream waiting to be reused.", - self.remote_peer_id, + tracing::debug!( + peer=?self.remote_peer_id, + "New inbound substream to peer exceeds inbound substream limit. \ + Removed older substream waiting to be reused." ) } else { - log::warn!( - "New inbound substream to {:?} exceeds inbound substream limit. \ - No older substream waiting to be reused. Dropping new substream.", - self.remote_peer_id, + tracing::warn!( + peer=?self.remote_peer_id, + "New inbound substream to peer exceeds inbound substream limit. \ + No older substream waiting to be reused. Dropping new substream." ); return; } @@ -688,12 +686,18 @@ impl ConnectionHandler for Handler { match &self.endpoint { ConnectedPoint::Dialer { .. } => { - log::debug!( - "Now operating in {new_mode}-mode on outbound connection with {peer}" + tracing::debug!( + %peer, + mode=%new_mode, + "Changed mode on outbound connection" ) } ConnectedPoint::Listener { local_addr, .. } => { - log::debug!("Now operating in {new_mode}-mode on inbound connection with {peer} assuming that one of our external addresses routes to {local_addr}") + tracing::debug!( + %peer, + mode=%new_mode, + local_address=%local_addr, + "Changed mode on inbound connection assuming that one of our external addresses routes to the local address") } } @@ -702,6 +706,7 @@ impl ConnectionHandler for Handler { } } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, @@ -787,8 +792,6 @@ impl ConnectionHandler for Handler { self.protocol_status = Some(compute_new_protocol_status( remote_supports_our_kademlia_protocols, self.protocol_status, - self.remote_peer_id, - self.connection_id, )) } } @@ -799,8 +802,6 @@ impl ConnectionHandler for Handler { fn compute_new_protocol_status( now_supported: bool, current_status: Option, - remote_peer_id: PeerId, - connection_id: ConnectionId, ) -> ProtocolStatus { let current_status = match current_status { None => { @@ -820,9 +821,9 @@ fn compute_new_protocol_status( } if now_supported { - log::debug!("Remote {remote_peer_id} now supports our kademlia protocol on connection {connection_id}"); + tracing::debug!("Remote now supports our kademlia protocol"); } else { - log::debug!("Remote {remote_peer_id} no longer supports our kademlia protocol on connection {connection_id}"); + tracing::debug!("Remote no longer supports our kademlia protocol"); } ProtocolStatus { @@ -997,7 +998,7 @@ impl futures::Stream for InboundSubstreamState { mut substream, } => match substream.poll_next_unpin(cx) { Poll::Ready(Some(Ok(KadRequestMsg::Ping))) => { - log::warn!("Kademlia PING messages are unsupported"); + tracing::warn!("Kademlia PING messages are unsupported"); *this = InboundSubstreamState::Closing(substream); } @@ -1071,7 +1072,7 @@ impl futures::Stream for InboundSubstreamState { return Poll::Ready(None); } Poll::Ready(Some(Err(e))) => { - trace!("Inbound substream error: {:?}", e); + tracing::trace!("Inbound substream error: {:?}", e); return Poll::Ready(None); } }, @@ -1172,6 +1173,7 @@ fn process_kad_response(event: KadResponseMsg, query_id: QueryId) -> HandlerEven mod tests { use super::*; use quickcheck::{Arbitrary, Gen}; + use tracing_subscriber::EnvFilter; impl Arbitrary for ProtocolStatus { fn arbitrary(g: &mut Gen) -> Self { @@ -1184,15 +1186,12 @@ mod tests { #[test] fn compute_next_protocol_status_test() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); fn prop(now_supported: bool, current: Option) { - let new = compute_new_protocol_status( - now_supported, - current, - PeerId::random(), - ConnectionId::new_unchecked(0), - ); + let new = compute_new_protocol_status(now_supported, current); match current { None => { diff --git a/protocols/kad/src/protocol.rs b/protocols/kad/src/protocol.rs index 1cf14745675..247b12bb4cd 100644 --- a/protocols/kad/src/protocol.rs +++ b/protocols/kad/src/protocol.rs @@ -106,7 +106,7 @@ impl TryFrom for KadPeer { match Multiaddr::try_from(addr) { Ok(a) => addrs.push(a), Err(e) => { - log::debug!("Unable to parse multiaddr: {e}"); + tracing::debug!("Unable to parse multiaddr: {e}"); } }; } diff --git a/protocols/kad/tests/client_mode.rs b/protocols/kad/tests/client_mode.rs index 5324e679ab9..f290a36b727 100644 --- a/protocols/kad/tests/client_mode.rs +++ b/protocols/kad/tests/client_mode.rs @@ -4,12 +4,15 @@ use libp2p_kad::store::MemoryStore; use libp2p_kad::{Behaviour, Config, Event, Mode}; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; +use tracing_subscriber::EnvFilter; use Event::*; use MyBehaviourEvent::*; #[async_std::test] async fn server_gets_added_to_routing_table_by_client() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut client = Swarm::new_ephemeral(MyBehaviour::new); let mut server = Swarm::new_ephemeral(MyBehaviour::new); @@ -32,7 +35,9 @@ async fn server_gets_added_to_routing_table_by_client() { #[async_std::test] async fn two_servers_add_each_other_to_routing_table() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut server1 = Swarm::new_ephemeral(MyBehaviour::new); let mut server2 = Swarm::new_ephemeral(MyBehaviour::new); @@ -71,7 +76,9 @@ async fn two_servers_add_each_other_to_routing_table() { #[async_std::test] async fn adding_an_external_addresses_activates_server_mode_on_existing_connections() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut client = Swarm::new_ephemeral(MyBehaviour::new); let mut server = Swarm::new_ephemeral(MyBehaviour::new); @@ -105,7 +112,9 @@ async fn adding_an_external_addresses_activates_server_mode_on_existing_connecti #[async_std::test] async fn set_client_to_server_mode() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut client = Swarm::new_ephemeral(MyBehaviour::new); client.behaviour_mut().kad.set_mode(Some(Mode::Client)); diff --git a/protocols/mdns/Cargo.toml b/protocols/mdns/Cargo.toml index 78e31bc9980..ef67a7e51b1 100644 --- a/protocols/mdns/Cargo.toml +++ b/protocols/mdns/Cargo.toml @@ -19,11 +19,11 @@ if-watch = "3.1.0" libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4.20" rand = "0.8.3" smallvec = "1.11.1" socket2 = { version = "0.5.5", features = ["all"] } tokio = { version = "1.33", default-features = false, features = ["net", "time"], optional = true} +tracing = "0.1.37" trust-dns-proto = { version = "0.23.0", default-features = false, features = ["mdns"] } void = "1.0.2" @@ -33,13 +33,13 @@ async-io = ["dep:async-io", "dep:async-std", "if-watch/smol"] [dev-dependencies] async-std = { version = "1.9.0", features = ["attributes"] } -env_logger = "0.10.0" libp2p-noise = { workspace = true } libp2p-swarm = { workspace = true, features = ["tokio", "async-std"] } libp2p-tcp = { workspace = true, features = ["tokio", "async-io"] } libp2p-yamux = { workspace = true } tokio = { version = "1.33", default-features = false, features = ["macros", "rt", "rt-multi-thread", "time"] } libp2p-swarm-test = { path = "../../swarm-test" } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [[test]] name = "use-async-std" diff --git a/protocols/mdns/src/behaviour.rs b/protocols/mdns/src/behaviour.rs index a460d56ad18..e1652db4762 100644 --- a/protocols/mdns/src/behaviour.rs +++ b/protocols/mdns/src/behaviour.rs @@ -282,6 +282,7 @@ where .on_swarm_event(&event); } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, @@ -310,18 +311,20 @@ where Ok(iface_state) => { e.insert(P::spawn(iface_state)); } - Err(err) => log::error!("failed to create `InterfaceState`: {}", err), + Err(err) => { + tracing::error!("failed to create `InterfaceState`: {}", err) + } } } } Ok(IfEvent::Down(inet)) => { if let Some(handle) = self.if_tasks.remove(&inet.addr()) { - log::info!("dropping instance {}", inet.addr()); + tracing::info!(instance=%inet.addr(), "dropping instance"); handle.abort(); } } - Err(err) => log::error!("if watch returned an error: {}", err), + Err(err) => tracing::error!("if watch returned an error: {}", err), } } // Emit discovered event. @@ -337,7 +340,7 @@ where { *cur_expires = cmp::max(*cur_expires, expiration); } else { - log::info!("discovered: {} {}", peer, addr); + tracing::info!(%peer, address=%addr, "discovered peer on address"); self.discovered_nodes.push((peer, addr.clone(), expiration)); discovered.push((peer, addr)); } @@ -353,7 +356,7 @@ where let mut expired = Vec::new(); self.discovered_nodes.retain(|(peer, addr, expiration)| { if *expiration <= now { - log::info!("expired: {} {}", peer, addr); + tracing::info!(%peer, address=%addr, "expired peer on address"); expired.push((*peer, addr.clone())); return false; } diff --git a/protocols/mdns/src/behaviour/iface.rs b/protocols/mdns/src/behaviour/iface.rs index 47601088fdc..7fe97c38381 100644 --- a/protocols/mdns/src/behaviour/iface.rs +++ b/protocols/mdns/src/behaviour/iface.rs @@ -117,7 +117,7 @@ where listen_addresses: Arc>, query_response_sender: mpsc::Sender<(PeerId, Multiaddr, Instant)>, ) -> io::Result { - log::info!("creating instance on iface {}", addr); + tracing::info!(address=%addr, "creating instance on iface address"); let recv_socket = match addr { IpAddr::V4(addr) => { let socket = Socket::new(Domain::IPV4, Type::DGRAM, Some(socket2::Protocol::UDP))?; @@ -184,7 +184,7 @@ where } pub(crate) fn reset_timer(&mut self) { - log::trace!("reset timer on {:#?} {:#?}", self.addr, self.probe_state); + tracing::trace!(address=%self.addr, probe_state=?self.probe_state, "reset timer"); let interval = *self.probe_state.interval(); self.timeout = T::interval(interval); } @@ -207,9 +207,9 @@ where loop { // 1st priority: Low latency: Create packet ASAP after timeout. if this.timeout.poll_next_unpin(cx).is_ready() { - log::trace!("sending query on iface {}", this.addr); + tracing::trace!(address=%this.addr, "sending query on iface"); this.send_buffer.push_back(build_query()); - log::trace!("tick on {:#?} {:#?}", this.addr, this.probe_state); + tracing::trace!(address=%this.addr, probe_state=?this.probe_state, "tick"); // Stop to probe when the initial interval reach the query interval if let ProbeState::Probing(interval) = this.probe_state { @@ -228,11 +228,11 @@ where if let Some(packet) = this.send_buffer.pop_front() { match this.send_socket.poll_write(cx, &packet, this.mdns_socket()) { Poll::Ready(Ok(_)) => { - log::trace!("sent packet on iface {}", this.addr); + tracing::trace!(address=%this.addr, "sent packet on iface address"); continue; } Poll::Ready(Err(err)) => { - log::error!("error sending packet on iface {} {}", this.addr, err); + tracing::error!(address=%this.addr, "error sending packet on iface address {}", err); continue; } Poll::Pending => { @@ -265,10 +265,10 @@ where .map_ok(|(len, from)| MdnsPacket::new_from_bytes(&this.recv_buffer[..len], from)) { Poll::Ready(Ok(Ok(Some(MdnsPacket::Query(query))))) => { - log::trace!( - "received query from {} on {}", - query.remote_addr(), - this.addr + tracing::trace!( + address=%this.addr, + remote_address=%query.remote_addr(), + "received query from remote address on address" ); this.send_buffer.extend(build_query_response( @@ -283,10 +283,10 @@ where continue; } Poll::Ready(Ok(Ok(Some(MdnsPacket::Response(response))))) => { - log::trace!( - "received response from {} on {}", - response.remote_addr(), - this.addr + tracing::trace!( + address=%this.addr, + remote_address=%response.remote_addr(), + "received response from remote address on address" ); this.discovered @@ -300,10 +300,10 @@ where continue; } Poll::Ready(Ok(Ok(Some(MdnsPacket::ServiceDiscovery(disc))))) => { - log::trace!( - "received service discovery from {} on {}", - disc.remote_addr(), - this.addr + tracing::trace!( + address=%this.addr, + remote_address=%disc.remote_addr(), + "received service discovery from remote address on address" ); this.send_buffer @@ -314,10 +314,10 @@ where // No more bytes available on the socket to read } Poll::Ready(Err(err)) => { - log::error!("failed reading datagram: {}", err); + tracing::error!("failed reading datagram: {}", err); } Poll::Ready(Ok(Err(err))) => { - log::debug!("Parsing mdns packet failed: {:?}", err); + tracing::debug!("Parsing mdns packet failed: {:?}", err); } Poll::Ready(Ok(Ok(None))) | Poll::Pending => {} } diff --git a/protocols/mdns/src/behaviour/iface/dns.rs b/protocols/mdns/src/behaviour/iface/dns.rs index 6a10497e69f..61fd5d329b9 100644 --- a/protocols/mdns/src/behaviour/iface/dns.rs +++ b/protocols/mdns/src/behaviour/iface/dns.rs @@ -134,7 +134,7 @@ pub(crate) fn build_query_response<'a>( records.push(txt_record); } Err(e) => { - log::warn!("Excluding address {} from response: {:?}", addr, e); + tracing::warn!(address=%addr, "Excluding address from response: {:?}", e); } } diff --git a/protocols/mdns/tests/use-async-std.rs b/protocols/mdns/tests/use-async-std.rs index 6d45d92cdd9..549f70978af 100644 --- a/protocols/mdns/tests/use-async-std.rs +++ b/protocols/mdns/tests/use-async-std.rs @@ -24,17 +24,22 @@ use libp2p_mdns::{async_io::Behaviour, Config}; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt as _; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[async_std::test] async fn test_discovery_async_std_ipv4() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); run_discovery_test(Config::default()).await } #[async_std::test] async fn test_discovery_async_std_ipv6() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let config = Config { enable_ipv6: true, @@ -45,7 +50,9 @@ async fn test_discovery_async_std_ipv6() { #[async_std::test] async fn test_expired_async_std() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let config = Config { ttl: Duration::from_secs(1), @@ -78,7 +85,9 @@ async fn test_expired_async_std() { #[async_std::test] async fn test_no_expiration_on_close_async_std() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let config = Config { ttl: Duration::from_secs(120), query_interval: Duration::from_secs(10), diff --git a/protocols/mdns/tests/use-tokio.rs b/protocols/mdns/tests/use-tokio.rs index 50d6be0c00f..cf0d9f4bed4 100644 --- a/protocols/mdns/tests/use-tokio.rs +++ b/protocols/mdns/tests/use-tokio.rs @@ -22,17 +22,22 @@ use libp2p_mdns::{tokio::Behaviour, Config, Event}; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt as _; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[tokio::test] async fn test_discovery_tokio_ipv4() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); run_discovery_test(Config::default()).await } #[tokio::test] async fn test_discovery_tokio_ipv6() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let config = Config { enable_ipv6: true, @@ -43,7 +48,9 @@ async fn test_discovery_tokio_ipv6() { #[tokio::test] async fn test_expired_tokio() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let config = Config { ttl: Duration::from_secs(1), diff --git a/protocols/perf/Cargo.toml b/protocols/perf/Cargo.toml index 8180b928691..18ff36a5cff 100644 --- a/protocols/perf/Cargo.toml +++ b/protocols/perf/Cargo.toml @@ -12,8 +12,7 @@ categories = ["network-programming", "asynchronous"] [dependencies] anyhow = "1" -clap = { version = "4.4.7", features = ["derive"] } -env_logger = "0.10.0" +clap = { version = "4.4.6", features = ["derive"] } futures = "0.3.29" futures-bounded = { workspace = true } futures-timer = "3.0" @@ -27,10 +26,11 @@ libp2p-swarm = { workspace = true, features = ["macros", "tokio"] } libp2p-tcp = { workspace = true, features = ["tokio"] } libp2p-tls = { workspace = true } libp2p-yamux = { workspace = true } -log = "0.4" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" thiserror = "1.0" +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } tokio = { version = "1.33", default-features = false, features = ["macros", "rt", "rt-multi-thread"] } void = "1" diff --git a/protocols/perf/src/bin/perf.rs b/protocols/perf/src/bin/perf.rs index 61371317ed2..9ac8f0a6cde 100644 --- a/protocols/perf/src/bin/perf.rs +++ b/protocols/perf/src/bin/perf.rs @@ -30,8 +30,8 @@ use libp2p::swarm::{NetworkBehaviour, Swarm, SwarmEvent}; use libp2p::SwarmBuilder; use libp2p_perf::{client, server}; use libp2p_perf::{Final, Intermediate, Run, RunParams, RunUpdate}; -use log::{error, info}; use serde::{Deserialize, Serialize}; +use tracing_subscriber::EnvFilter; #[derive(Debug, Parser)] #[clap(name = "libp2p perf client")] @@ -71,9 +71,9 @@ impl FromStr for Transport { #[tokio::main] async fn main() -> Result<()> { - env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")) - .format_timestamp_millis() - .init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let opts = Opts::parse(); match opts { @@ -121,20 +121,20 @@ async fn server(server_address: SocketAddr) -> Result<()> { loop { match swarm.next().await.unwrap() { SwarmEvent::NewListenAddr { address, .. } => { - info!("Listening on {address}"); + tracing::info!(%address, "Listening on address"); } SwarmEvent::IncomingConnection { .. } => {} e @ SwarmEvent::IncomingConnectionError { .. } => { - error!("{e:?}"); + tracing::error!("{e:?}"); } SwarmEvent::ConnectionEstablished { peer_id, endpoint, .. } => { - info!("Established connection to {:?} via {:?}", peer_id, endpoint); + tracing::info!(peer=%peer_id, ?endpoint, "Established new connection"); } SwarmEvent::ConnectionClosed { .. } => {} SwarmEvent::Behaviour(server::Event { .. }) => { - info!("Finished run",) + tracing::info!("Finished run",) } e => panic!("{e:?}"), } @@ -168,7 +168,7 @@ async fn client( let mut swarm = swarm().await?; tokio::spawn(async move { - info!("start benchmark: custom"); + tracing::info!("start benchmark: custom"); let start = Instant::now(); @@ -241,7 +241,7 @@ async fn connect( let duration = start.elapsed(); let duration_seconds = duration.as_secs_f64(); - info!("established connection in {duration_seconds:.4} s"); + tracing::info!(elapsed_time=%format!("{duration_seconds:.4} s")); Ok(server_peer_id) } @@ -259,7 +259,7 @@ async fn perf( id: _, result: Ok(RunUpdate::Intermediate(progressed)), }) => { - info!("{progressed}"); + tracing::info!("{progressed}"); let Intermediate { duration, @@ -288,7 +288,7 @@ async fn perf( let run = Run { params, duration }; - info!("{run}"); + tracing::info!("{run}"); Ok(run) } diff --git a/protocols/perf/src/client/behaviour.rs b/protocols/perf/src/client/behaviour.rs index a4dc354fac0..79c73d55102 100644 --- a/protocols/perf/src/client/behaviour.rs +++ b/protocols/perf/src/client/behaviour.rs @@ -145,6 +145,7 @@ impl NetworkBehaviour for Behaviour { .push_back(ToSwarm::GenerateEvent(Event { id, result })); } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, _cx))] fn poll( &mut self, _cx: &mut Context<'_>, diff --git a/protocols/perf/src/client/handler.rs b/protocols/perf/src/client/handler.rs index a9bb0c7d483..d5d05284a85 100644 --- a/protocols/perf/src/client/handler.rs +++ b/protocols/perf/src/client/handler.rs @@ -153,6 +153,7 @@ impl ConnectionHandler for Handler { } } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, diff --git a/protocols/perf/src/server/behaviour.rs b/protocols/perf/src/server/behaviour.rs index c699f706d87..370bc2ae188 100644 --- a/protocols/perf/src/server/behaviour.rs +++ b/protocols/perf/src/server/behaviour.rs @@ -105,6 +105,7 @@ impl NetworkBehaviour for Behaviour { })) } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, _cx))] fn poll( &mut self, _cx: &mut Context<'_>, diff --git a/protocols/perf/src/server/handler.rs b/protocols/perf/src/server/handler.rs index 4e739995b67..7f262ac4820 100644 --- a/protocols/perf/src/server/handler.rs +++ b/protocols/perf/src/server/handler.rs @@ -29,7 +29,7 @@ use libp2p_swarm::{ }, ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, SubstreamProtocol, }; -use log::error; +use tracing::error; use void::Void; use crate::Run; @@ -96,7 +96,7 @@ impl ConnectionHandler for Handler { .try_push(crate::protocol::receive_send(protocol).boxed()) .is_err() { - log::warn!("Dropping inbound stream because we are at capacity"); + tracing::warn!("Dropping inbound stream because we are at capacity"); } } ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { info, .. }) => { @@ -115,6 +115,7 @@ impl ConnectionHandler for Handler { } } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, diff --git a/protocols/perf/tests/lib.rs b/protocols/perf/tests/lib.rs index 447d8a06110..017d475befd 100644 --- a/protocols/perf/tests/lib.rs +++ b/protocols/perf/tests/lib.rs @@ -24,10 +24,13 @@ use libp2p_perf::{ }; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; +use tracing_subscriber::EnvFilter; #[tokio::test] async fn perf() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut server = Swarm::new_ephemeral(|_| server::Behaviour::new()); let server_peer_id = *server.local_peer_id(); diff --git a/protocols/ping/Cargo.toml b/protocols/ping/Cargo.toml index e1ec6d75871..28ae5d39b62 100644 --- a/protocols/ping/Cargo.toml +++ b/protocols/ping/Cargo.toml @@ -18,16 +18,16 @@ instant = "0.1.12" libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4.20" rand = "0.8" +tracing = "0.1.37" void = "1.0" [dev-dependencies] async-std = "1.6.2" -env_logger = "0.10.0" libp2p-swarm = { workspace = true, features = ["macros"] } libp2p-swarm-test = { path = "../../swarm-test" } quickcheck = { workspace = true } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/ping/src/handler.rs b/protocols/ping/src/handler.rs index 3a92ef4b249..71ebcd97261 100644 --- a/protocols/ping/src/handler.rs +++ b/protocols/ping/src/handler.rs @@ -23,7 +23,6 @@ use futures::future::{BoxFuture, Either}; use futures::prelude::*; use futures_timer::Delay; use libp2p_core::upgrade::ReadyUpgrade; -use libp2p_identity::PeerId; use libp2p_swarm::handler::{ ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, }; @@ -147,8 +146,6 @@ pub struct Handler { inbound: Option, /// Tracks the state of our handler. state: State, - /// The peer we are connected to. - peer: PeerId, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] @@ -166,9 +163,8 @@ enum State { impl Handler { /// Builds a new [`Handler`] with the given configuration. - pub fn new(config: Config, peer: PeerId) -> Self { + pub fn new(config: Config) -> Self { Handler { - peer, config, interval: Delay::new(Duration::new(0, 0)), pending_errors: VecDeque::with_capacity(2), @@ -225,6 +221,7 @@ impl ConnectionHandler for Handler { fn on_behaviour_event(&mut self, _: Void) {} + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, @@ -254,11 +251,11 @@ impl ConnectionHandler for Handler { match fut.poll_unpin(cx) { Poll::Pending => {} Poll::Ready(Err(e)) => { - log::debug!("Inbound ping error: {:?}", e); + tracing::debug!("Inbound ping error: {:?}", e); self.inbound = None; } Poll::Ready(Ok(stream)) => { - log::trace!("answered inbound ping from {}", self.peer); + tracing::trace!("answered inbound ping from peer"); // A ping from a remote peer has been answered, wait for the next. self.inbound = Some(protocol::recv_ping(stream).boxed()); @@ -269,7 +266,7 @@ impl ConnectionHandler for Handler { loop { // Check for outbound ping failures. if let Some(error) = self.pending_errors.pop_back() { - log::debug!("Ping failure: {:?}", error); + tracing::debug!("Ping failure: {:?}", error); self.failures += 1; @@ -291,8 +288,7 @@ impl ConnectionHandler for Handler { break; } Poll::Ready(Ok((stream, rtt))) => { - log::debug!("latency to {} is {}ms", self.peer, rtt.as_millis()); - + tracing::debug!(?rtt, "ping succeeded"); self.failures = 0; self.interval.reset(self.config.interval); self.outbound = Some(OutboundState::Idle(stream)); diff --git a/protocols/ping/src/lib.rs b/protocols/ping/src/lib.rs index 3e3d14477b5..3e17db300e7 100644 --- a/protocols/ping/src/lib.rs +++ b/protocols/ping/src/lib.rs @@ -111,21 +111,21 @@ impl NetworkBehaviour for Behaviour { fn handle_established_inbound_connection( &mut self, _: ConnectionId, - peer: PeerId, + _: PeerId, _: &Multiaddr, _: &Multiaddr, ) -> Result, ConnectionDenied> { - Ok(Handler::new(self.config.clone(), peer)) + Ok(Handler::new(self.config.clone())) } fn handle_established_outbound_connection( &mut self, _: ConnectionId, - peer: PeerId, + _: PeerId, _: &Multiaddr, _: Endpoint, ) -> Result, ConnectionDenied> { - Ok(Handler::new(self.config.clone(), peer)) + Ok(Handler::new(self.config.clone())) } fn on_connection_handler_event( @@ -141,6 +141,7 @@ impl NetworkBehaviour for Behaviour { }) } + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self))] fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { if let Some(e) = self.events.pop_back() { Poll::Ready(ToSwarm::GenerateEvent(e)) diff --git a/protocols/relay/Cargo.toml b/protocols/relay/Cargo.toml index bca55217a2a..7ad23af9b0a 100644 --- a/protocols/relay/Cargo.toml +++ b/protocols/relay/Cargo.toml @@ -21,23 +21,24 @@ instant = "0.1.12" libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4" quick-protobuf = "0.8" quick-protobuf-codec = { workspace = true } rand = "0.8.4" static_assertions = "1" thiserror = "1.0" +tracing = "0.1.37" void = "1" [dev-dependencies] -env_logger = "0.10.0" libp2p-identity = { workspace = true, features = ["rand"] } libp2p-ping = { workspace = true } libp2p-plaintext = { workspace = true } libp2p-swarm = { workspace = true, features = ["macros", "async-std"] } +libp2p-swarm-test = { workspace = true } libp2p-yamux = { workspace = true } quickcheck = { workspace = true } -libp2p-swarm-test = { workspace = true } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } + # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/relay/src/behaviour.rs b/protocols/relay/src/behaviour.rs index 5b9f1fe5843..98e2a5a53bb 100644 --- a/protocols/relay/src/behaviour.rs +++ b/protocols/relay/src/behaviour.rs @@ -707,7 +707,11 @@ impl NetworkBehaviour for Behaviour { } } - fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, _cx))] + fn poll( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll>> { if let Some(to_swarm) = self.queued_actions.pop_front() { return Poll::Ready(to_swarm); } diff --git a/protocols/relay/src/behaviour/handler.rs b/protocols/relay/src/behaviour/handler.rs index a2ba268392f..361fb8ac333 100644 --- a/protocols/relay/src/behaviour/handler.rs +++ b/protocols/relay/src/behaviour/handler.rs @@ -409,7 +409,7 @@ impl Handler { )) .is_err() { - log::warn!("Dropping inbound stream because we are at capacity") + tracing::warn!("Dropping inbound stream because we are at capacity") } } @@ -432,7 +432,7 @@ impl Handler { ) .is_err() { - log::warn!("Dropping outbound stream because we are at capacity") + tracing::warn!("Dropping outbound stream because we are at capacity") } self.active_connect_requests @@ -505,7 +505,7 @@ impl ConnectionHandler for Handler { )) .is_some() { - log::warn!("Dropping existing deny/accept future in favor of new one.") + tracing::warn!("Dropping existing deny/accept future in favor of new one") } } In::DenyReservationReq { @@ -519,7 +519,7 @@ impl ConnectionHandler for Handler { )) .is_some() { - log::warn!("Dropping existing deny/accept future in favor of new one.") + tracing::warn!("Dropping existing deny/accept future in favor of new one") } } In::NegotiateOutboundConnect { @@ -588,6 +588,7 @@ impl ConnectionHandler for Handler { Instant::now().duration_since(idle_at) <= Duration::from_secs(10) } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, @@ -651,11 +652,11 @@ impl ConnectionHandler for Handler { )); } Poll::Ready(Err(e)) => { - log::debug!("Inbound stream operation timed out: {e}"); + tracing::debug!("Inbound stream operation timed out: {e}"); continue; } Poll::Ready(Ok(Err(e))) => { - log::debug!("Inbound stream operation failed: {e}"); + tracing::debug!("Inbound stream operation failed: {e}"); continue; } Poll::Pending => { diff --git a/protocols/relay/src/priv_client/handler.rs b/protocols/relay/src/priv_client/handler.rs index 3e79b60ef97..d884f15c7eb 100644 --- a/protocols/relay/src/priv_client/handler.rs +++ b/protocols/relay/src/priv_client/handler.rs @@ -35,7 +35,6 @@ use libp2p_swarm::{ ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, StreamUpgradeError, SubstreamProtocol, }; -use log::debug; use std::collections::VecDeque; use std::task::{Context, Poll}; use std::time::Duration; @@ -189,7 +188,7 @@ impl Handler { if let Err(e) = to_listener.try_send(transport::ToListenerMsg::Reservation(Err(error))) { - log::debug!("Unable to send error to listener: {}", e.into_send_error()) + tracing::debug!("Unable to send error to listener: {}", e.into_send_error()) } self.reservation.failed(); } @@ -220,8 +219,9 @@ impl Handler { .try_push(circuit.deny(proto::Status::NO_RESERVATION)) .is_err() { - log::warn!( - "Dropping existing inbound circuit request to be denied from {src_peer_id} in favor of new one." + tracing::warn!( + peer=%src_peer_id, + "Dropping existing inbound circuit request to be denied from peer in favor of new one" ) } } @@ -270,6 +270,7 @@ impl ConnectionHandler for Handler { self.reservation.is_some() } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, @@ -319,7 +320,7 @@ impl ConnectionHandler for Handler { if let Err(e) = to_listener.try_send(transport::ToListenerMsg::Reservation(Err(error))) { - log::debug!("Unable to send error to listener: {}", e.into_send_error()) + tracing::debug!("Unable to send error to listener: {}", e.into_send_error()) } self.reservation.failed(); continue; @@ -335,7 +336,7 @@ impl ConnectionHandler for Handler { outbound_hop::ReserveError::Io(io::ErrorKind::TimedOut.into()), ))) { - log::debug!("Unable to send error to listener: {}", e.into_send_error()) + tracing::debug!("Unable to send error to listener: {}", e.into_send_error()) } self.reservation.failed(); continue; @@ -367,7 +368,7 @@ impl ConnectionHandler for Handler { })) .is_err() { - log::debug!( + tracing::debug!( "Dropping newly established circuit because the listener is gone" ); continue; @@ -397,7 +398,7 @@ impl ConnectionHandler for Handler { outbound_hop::ReserveError::Io(io::ErrorKind::TimedOut.into()), ))) { - log::debug!("Unable to send error to listener: {}", e.into_send_error()) + tracing::debug!("Unable to send error to listener: {}", e.into_send_error()) } self.reservation.failed(); continue; @@ -437,11 +438,11 @@ impl ConnectionHandler for Handler { } }, Poll::Ready(Ok(Err(e))) => { - log::debug!("An inbound circuit request failed: {e}"); + tracing::debug!("An inbound circuit request failed: {e}"); continue; } Poll::Ready(Err(e)) => { - log::debug!("An inbound circuit request timed out: {e}"); + tracing::debug!("An inbound circuit request timed out: {e}"); continue; } Poll::Pending => {} @@ -460,11 +461,11 @@ impl ConnectionHandler for Handler { match self.inflight_outbound_circuit_deny_requests.poll_unpin(cx) { Poll::Ready(Ok(Ok(()))) => continue, Poll::Ready(Ok(Err(error))) => { - log::debug!("Denying inbound circuit failed: {error}"); + tracing::debug!("Denying inbound circuit failed: {error}"); continue; } Poll::Ready(Err(futures_bounded::Timeout { .. })) => { - log::debug!("Denying inbound circuit timed out"); + tracing::debug!("Denying inbound circuit timed out"); continue; } Poll::Pending => {} @@ -493,7 +494,7 @@ impl ConnectionHandler for Handler { .try_push(inbound_stop::handle_open_circuit(stream)) .is_err() { - log::warn!("Dropping inbound stream because we are at capacity") + tracing::warn!("Dropping inbound stream because we are at capacity") } } ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { @@ -511,7 +512,7 @@ impl ConnectionHandler for Handler { .try_push(outbound_hop::make_reservation(stream)) .is_err() { - log::warn!("Dropping outbound stream because we are at capacity") + tracing::warn!("Dropping outbound stream because we are at capacity") } } PendingRequest::Connect { @@ -525,7 +526,7 @@ impl ConnectionHandler for Handler { .try_push(outbound_hop::open_circuit(stream, dst_peer_id)) .is_err() { - log::warn!("Dropping outbound stream because we are at capacity") + tracing::warn!("Dropping outbound stream because we are at capacity") } } } @@ -617,12 +618,12 @@ impl Reservation { if let Err(e) = to_listener .start_send(pending_msgs.pop_front().expect("Called !is_empty().")) { - debug!("Failed to sent pending message to listener: {:?}", e); + tracing::debug!("Failed to sent pending message to listener: {:?}", e); *self = Reservation::None; } } Poll::Ready(Err(e)) => { - debug!("Channel to listener failed: {:?}", e); + tracing::debug!("Channel to listener failed: {:?}", e); *self = Reservation::None; } Poll::Pending => {} diff --git a/protocols/relay/src/protocol/inbound_hop.rs b/protocols/relay/src/protocol/inbound_hop.rs index 69ec495261f..951ae579a2d 100644 --- a/protocols/relay/src/protocol/inbound_hop.rs +++ b/protocols/relay/src/protocol/inbound_hop.rs @@ -58,7 +58,7 @@ pub struct ReservationReq { impl ReservationReq { pub async fn accept(self, addrs: Vec) -> Result<(), Error> { if addrs.is_empty() { - log::debug!( + tracing::debug!( "Accepting relay reservation without providing external addresses of local node. \ Thus the remote node might not be able to advertise its relayed address." ) diff --git a/protocols/relay/tests/lib.rs b/protocols/relay/tests/lib.rs index 39fc2b1f6dc..d57ab144e9f 100644 --- a/protocols/relay/tests/lib.rs +++ b/protocols/relay/tests/lib.rs @@ -38,10 +38,13 @@ use libp2p_swarm::{Config, DialError, NetworkBehaviour, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; use std::error::Error; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[test] fn reservation() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut pool = LocalPool::new(); let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); @@ -82,7 +85,9 @@ fn reservation() { #[test] fn new_reservation_to_same_relay_replaces_old() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut pool = LocalPool::new(); let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); @@ -173,7 +178,9 @@ fn new_reservation_to_same_relay_replaces_old() { #[test] fn connect() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut pool = LocalPool::new(); let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); @@ -257,7 +264,9 @@ async fn connection_established_to( #[test] fn handle_dial_failure() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut pool = LocalPool::new(); let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); @@ -276,7 +285,9 @@ fn handle_dial_failure() { #[test] fn propagate_reservation_error_to_listener() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut pool = LocalPool::new(); let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); @@ -323,7 +334,9 @@ fn propagate_reservation_error_to_listener() { #[test] fn propagate_connect_error_to_unknown_peer_to_dialer() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut pool = LocalPool::new(); let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); @@ -377,7 +390,9 @@ fn propagate_connect_error_to_unknown_peer_to_dialer() { #[test] fn reuse_connection() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut pool = LocalPool::new(); let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); diff --git a/protocols/rendezvous/Cargo.toml b/protocols/rendezvous/Cargo.toml index a56cf737656..c5f1c6e5729 100644 --- a/protocols/rendezvous/Cargo.toml +++ b/protocols/rendezvous/Cargo.toml @@ -21,24 +21,24 @@ libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } libp2p-request-response = { workspace = true } -log = "0.4" quick-protobuf = "0.8" quick-protobuf-codec = { workspace = true } rand = "0.8" thiserror = "1" +tracing = "0.1.37" void = "1" [dev-dependencies] -env_logger = "0.10.0" libp2p-swarm = { workspace = true, features = ["macros", "tokio"] } libp2p-noise = { workspace = true } libp2p-ping = { workspace = true } libp2p-identify = { workspace = true } -libp2p-yamux = { workspace = true } +libp2p-swarm-test = { path = "../../swarm-test" } libp2p-tcp = { workspace = true, features = ["tokio"] } +libp2p-yamux = { workspace = true } rand = "0.8" tokio = { version = "1.33", features = [ "rt-multi-thread", "time", "macros", "sync", "process", "fs", "net" ] } -libp2p-swarm-test = { path = "../../swarm-test" } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/rendezvous/src/client.rs b/protocols/rendezvous/src/client.rs index e4aedd9da7a..c6072533194 100644 --- a/protocols/rendezvous/src/client.rs +++ b/protocols/rendezvous/src/client.rs @@ -232,7 +232,7 @@ impl NetworkBehaviour for Behaviour { let registered = self.registered_namespaces.clone(); for ((rz_node, ns), ttl) in registered { if let Err(e) = self.register(ns, rz_node, Some(ttl)) { - log::warn!("refreshing registration failed: {e}") + tracing::warn!("refreshing registration failed: {e}") } } } diff --git a/protocols/rendezvous/src/server.rs b/protocols/rendezvous/src/server.rs index 8911f2cea01..886b64cc829 100644 --- a/protocols/rendezvous/src/server.rs +++ b/protocols/rendezvous/src/server.rs @@ -194,7 +194,11 @@ impl NetworkBehaviour for Behaviour { request_id, error, }) => { - log::warn!("Inbound request {request_id} with peer {peer} failed: {error}"); + tracing::warn!( + %peer, + request=%request_id, + "Inbound request with peer failed: {error}" + ); continue; } diff --git a/protocols/rendezvous/tests/rendezvous.rs b/protocols/rendezvous/tests/rendezvous.rs index fec56365768..c2de88fd615 100644 --- a/protocols/rendezvous/tests/rendezvous.rs +++ b/protocols/rendezvous/tests/rendezvous.rs @@ -29,10 +29,13 @@ use libp2p_swarm::{DialError, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; use std::convert::TryInto; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[tokio::test] async fn given_successful_registration_then_successful_discovery() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let ([mut alice, mut bob], mut robert) = new_server_with_connected_clients(rendezvous::server::Config::default()).await; @@ -85,7 +88,9 @@ async fn given_successful_registration_then_successful_discovery() { #[tokio::test] async fn should_return_error_when_no_external_addresses() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let server = new_server(rendezvous::server::Config::default()).await; let mut client = Swarm::new_ephemeral(rendezvous::client::Behaviour::new); @@ -100,7 +105,9 @@ async fn should_return_error_when_no_external_addresses() { #[tokio::test] async fn given_successful_registration_then_refresh_ttl() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let ([mut alice, mut bob], mut robert) = new_server_with_connected_clients(rendezvous::server::Config::default()).await; @@ -166,7 +173,9 @@ async fn given_successful_registration_then_refresh_ttl() { #[tokio::test] async fn given_successful_registration_then_refresh_external_addrs() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let ([mut alice], mut robert) = new_server_with_connected_clients(rendezvous::server::Config::default()).await; @@ -217,7 +226,9 @@ async fn given_successful_registration_then_refresh_external_addrs() { #[tokio::test] async fn given_invalid_ttl_then_unsuccessful_registration() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let ([mut alice], mut robert) = new_server_with_connected_clients(rendezvous::server::Config::default()).await; @@ -244,7 +255,9 @@ async fn given_invalid_ttl_then_unsuccessful_registration() { #[tokio::test] async fn discover_allows_for_dial_by_peer_id() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let ([mut alice, mut bob], robert) = new_server_with_connected_clients(rendezvous::server::Config::default()).await; @@ -299,7 +312,9 @@ async fn discover_allows_for_dial_by_peer_id() { #[tokio::test] async fn eve_cannot_register() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let mut robert = new_server(rendezvous::server::Config::default()).await; let mut eve = new_impersonating_client().await; @@ -325,7 +340,9 @@ async fn eve_cannot_register() { // test if charlie can operate as client and server simultaneously #[tokio::test] async fn can_combine_client_and_server() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let ([mut alice], mut robert) = new_server_with_connected_clients(rendezvous::server::Config::default()).await; @@ -361,7 +378,9 @@ async fn can_combine_client_and_server() { #[tokio::test] async fn registration_on_clients_expire() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let ([mut alice, mut bob], robert) = new_server_with_connected_clients(rendezvous::server::Config::default().with_min_ttl(1)) diff --git a/protocols/request-response/Cargo.toml b/protocols/request-response/Cargo.toml index f0ed6ea5961..26a2d0ecc81 100644 --- a/protocols/request-response/Cargo.toml +++ b/protocols/request-response/Cargo.toml @@ -22,8 +22,8 @@ rand = "0.8" serde = { version = "1.0", optional = true} serde_json = { version = "1.0.107", optional = true } smallvec = "1.11.1" +tracing = "0.1.37" void = "1.0.2" -log = "0.4.20" futures-timer = "3.0.2" futures-bounded = { workspace = true } @@ -34,7 +34,6 @@ cbor = ["dep:serde", "dep:cbor4ii", "libp2p-swarm/macros"] [dev-dependencies] anyhow = "1.0.75" async-std = { version = "1.6.2", features = ["attributes"] } -env_logger = "0.10.0" libp2p-noise = { workspace = true } libp2p-tcp = { workspace = true, features = ["async-io"] } libp2p-yamux = { workspace = true } @@ -42,6 +41,7 @@ rand = "0.8" libp2p-swarm-test = { path = "../../swarm-test" } futures_ringbuf = "0.4.0" serde = { version = "1.0", features = ["derive"]} +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/request-response/src/handler.rs b/protocols/request-response/src/handler.rs index f4f5bf96c6c..ef4b5b44fe0 100644 --- a/protocols/request-response/src/handler.rs +++ b/protocols/request-response/src/handler.rs @@ -164,7 +164,7 @@ where .try_push(RequestId::Inbound(request_id), recv.boxed()) .is_err() { - log::warn!("Dropping inbound stream because we are at capacity") + tracing::warn!("Dropping inbound stream because we are at capacity") } } @@ -204,7 +204,7 @@ where .try_push(RequestId::Outbound(request_id), send.boxed()) .is_err() { - log::warn!("Dropping outbound stream because we are at capacity") + tracing::warn!("Dropping outbound stream because we are at capacity") } } @@ -236,7 +236,7 @@ where } StreamUpgradeError::Apply(e) => void::unreachable(e), StreamUpgradeError::Io(e) => { - log::debug!( + tracing::debug!( "outbound stream for request {} failed: {e}, retrying", message.request_id ); @@ -386,6 +386,7 @@ where self.pending_outbound.push_back(request); } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, diff --git a/protocols/request-response/src/lib.rs b/protocols/request-response/src/lib.rs index f036fb85956..68a6b689fe5 100644 --- a/protocols/request-response/src/lib.rs +++ b/protocols/request-response/src/lib.rs @@ -859,7 +859,7 @@ where .push_back(ToSwarm::GenerateEvent(Event::Message { peer, message })); } None => { - log::debug!("Connection ({connection}) closed after `Event::Request` ({request_id}) has been emitted."); + tracing::debug!("Connection ({connection}) closed after `Event::Request` ({request_id}) has been emitted."); } }, handler::Event::ResponseSent(request_id) => { @@ -940,7 +940,9 @@ where })); } else { // This happens when timeout is emitted before `read_request` finishes. - log::debug!("Inbound request timeout for an unknown request_id ({request_id})"); + tracing::debug!( + "Inbound request timeout for an unknown request_id ({request_id})" + ); } } handler::Event::InboundStreamFailed { request_id, error } => { @@ -955,7 +957,7 @@ where })); } else { // This happens when `read_request` fails. - log::debug!("Inbound failure is reported for an unknown request_id ({request_id}): {error}"); + tracing::debug!("Inbound failure is reported for an unknown request_id ({request_id}): {error}"); } } } diff --git a/protocols/request-response/tests/error_reporting.rs b/protocols/request-response/tests/error_reporting.rs index 2256403c0e4..2dc82b2e0c5 100644 --- a/protocols/request-response/tests/error_reporting.rs +++ b/protocols/request-response/tests/error_reporting.rs @@ -13,10 +13,13 @@ use request_response::{ use std::pin::pin; use std::time::Duration; use std::{io, iter}; +use tracing_subscriber::EnvFilter; #[async_std::test] async fn report_outbound_failure_on_read_response() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (peer1_id, mut swarm1) = new_swarm(); let (peer2_id, mut swarm2) = new_swarm(); @@ -70,7 +73,9 @@ async fn report_outbound_failure_on_read_response() { #[async_std::test] async fn report_outbound_failure_on_write_request() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (peer1_id, mut swarm1) = new_swarm(); let (_peer2_id, mut swarm2) = new_swarm(); @@ -111,7 +116,9 @@ async fn report_outbound_failure_on_write_request() { #[async_std::test] async fn report_outbound_timeout_on_read_response() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); // `swarm1` needs to have a bigger timeout to avoid racing let (peer1_id, mut swarm1) = new_swarm_with_timeout(Duration::from_millis(200)); @@ -156,7 +163,9 @@ async fn report_outbound_timeout_on_read_response() { #[async_std::test] async fn report_inbound_failure_on_read_request() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (peer1_id, mut swarm1) = new_swarm(); let (_peer2_id, mut swarm2) = new_swarm(); @@ -191,7 +200,9 @@ async fn report_inbound_failure_on_read_request() { #[async_std::test] async fn report_inbound_failure_on_write_response() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (peer1_id, mut swarm1) = new_swarm(); let (peer2_id, mut swarm2) = new_swarm(); @@ -255,7 +266,9 @@ async fn report_inbound_failure_on_write_response() { #[async_std::test] async fn report_inbound_timeout_on_write_response() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); // `swarm2` needs to have a bigger timeout to avoid racing let (peer1_id, mut swarm1) = new_swarm_with_timeout(Duration::from_millis(100)); diff --git a/protocols/request-response/tests/ping.rs b/protocols/request-response/tests/ping.rs index c751dc2b3dd..b9e7878a78b 100644 --- a/protocols/request-response/tests/ping.rs +++ b/protocols/request-response/tests/ping.rs @@ -29,11 +29,14 @@ use libp2p_swarm_test::SwarmExt; use rand::{self, Rng}; use serde::{Deserialize, Serialize}; use std::{io, iter}; +use tracing_subscriber::EnvFilter; #[async_std::test] #[cfg(feature = "cbor")] async fn is_response_outbound() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let ping = Ping("ping".to_string().into_bytes()); let offline_peer = PeerId::random(); diff --git a/protocols/upnp/Cargo.toml b/protocols/upnp/Cargo.toml index 7ada98e2d19..30d50923009 100644 --- a/protocols/upnp/Cargo.toml +++ b/protocols/upnp/Cargo.toml @@ -16,9 +16,9 @@ futures-timer = "3.0.2" igd-next = "0.14.2" libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } -log = "0.4.19" -void = "1.0.2" tokio = { version = "1.33", default-features = false, features = ["rt"], optional = true } +tracing = "0.1.37" +void = "1.0.2" [features] tokio = ["igd-next/aio_tokio", "dep:tokio"] diff --git a/protocols/upnp/src/behaviour.rs b/protocols/upnp/src/behaviour.rs index 3d83545b952..5410b8dd13f 100644 --- a/protocols/upnp/src/behaviour.rs +++ b/protocols/upnp/src/behaviour.rs @@ -175,9 +175,9 @@ impl MappingList { mapping: mapping.clone(), duration, }) { - log::debug!( - "could not request port mapping for {} on the gateway: {}", - mapping.multiaddr, + tracing::debug!( + multiaddress=%mapping.multiaddr, + "could not request port mapping for multiaddress on the gateway: {}", err ); } @@ -190,9 +190,9 @@ impl MappingList { mapping: mapping.clone(), duration, }) { - log::debug!( - "could not request port mapping for {} on the gateway: {}", - mapping.multiaddr, + tracing::debug!( + multiaddress=%mapping.multiaddr, + "could not request port mapping for multiaddress on the gateway: {}", err ); } @@ -261,7 +261,7 @@ impl NetworkBehaviour for Behaviour { let (addr, protocol) = match multiaddr_to_socketaddr_protocol(multiaddr.clone()) { Ok(addr_port) => addr_port, Err(()) => { - log::debug!("multiaddress not supported for UPnP {multiaddr}"); + tracing::debug!("multiaddress not supported for UPnP {multiaddr}"); return; } }; @@ -271,7 +271,11 @@ impl NetworkBehaviour for Behaviour { .iter() .find(|(mapping, _state)| mapping.internal_addr.port() == addr.port()) { - log::debug!("port from multiaddress {multiaddr} is already being mapped to another multiaddr: {}", mapping.multiaddr); + tracing::debug!( + multiaddress=%multiaddr, + mapped_multiaddress=%mapping.multiaddr, + "port from multiaddress is already being mapped" + ); return; } @@ -302,9 +306,9 @@ impl NetworkBehaviour for Behaviour { mapping: mapping.clone(), duration, }) { - log::debug!( - "could not request port mapping for {} on the gateway: {}", - mapping.multiaddr, + tracing::debug!( + multiaddress=%mapping.multiaddr, + "could not request port mapping for multiaddress on the gateway: {}", err ); } @@ -312,14 +316,17 @@ impl NetworkBehaviour for Behaviour { self.mappings.insert(mapping, MappingState::Pending); } GatewayState::GatewayNotFound => { - log::debug!( - "network gateway not found, UPnP port mapping of {multiaddr} discarded" + tracing::debug!( + multiaddres=%multiaddr, + "network gateway not found, UPnP port mapping of multiaddres discarded" ); } GatewayState::NonRoutableGateway(addr) => { - log::debug!( - "the network gateway is not exposed to the public network, \ - it's ip is {addr}. UPnP port mapping of {multiaddr} discarded" + tracing::debug!( + multiaddress=%multiaddr, + network_gateway_ip=%addr, + "the network gateway is not exposed to the public network. / + UPnP port mapping of multiaddress discarded" ); } }; @@ -334,9 +341,9 @@ impl NetworkBehaviour for Behaviour { .sender .try_send(GatewayRequest::RemoveMapping(mapping.clone())) { - log::debug!( - "could not request port removal for {} on the gateway: {}", - mapping.multiaddr, + tracing::debug!( + multiaddress=%mapping.multiaddr, + "could not request port removal for multiaddress on the gateway: {}", err ); } @@ -367,6 +374,7 @@ impl NetworkBehaviour for Behaviour { void::unreachable(event) } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, @@ -387,9 +395,9 @@ impl NetworkBehaviour for Behaviour { if !is_addr_global(gateway.external_addr) { self.state = GatewayState::NonRoutableGateway(gateway.external_addr); - log::debug!( - "the gateway is not routable, its address is {}", - gateway.external_addr + tracing::debug!( + gateway_address=%gateway.external_addr, + "the gateway is not routable" ); return Poll::Ready(ToSwarm::GenerateEvent( Event::NonRoutableGateway, @@ -398,7 +406,7 @@ impl NetworkBehaviour for Behaviour { self.state = GatewayState::Available(gateway); } Err(err) => { - log::debug!("could not find gateway: {err}"); + tracing::debug!("could not find gateway: {err}"); self.state = GatewayState::GatewayNotFound; return Poll::Ready(ToSwarm::GenerateEvent(Event::GatewayNotFound)); } @@ -426,20 +434,20 @@ impl NetworkBehaviour for Behaviour { self.pending_events.push_back(Event::NewExternalAddr( external_multiaddr.clone(), )); - log::debug!( - "succcessfully mapped UPnP {} for {} protocol", - mapping.internal_addr, - mapping.protocol + tracing::debug!( + address=%mapping.internal_addr, + protocol=%mapping.protocol, + "successfully mapped UPnP for protocol" ); return Poll::Ready(ToSwarm::ExternalAddrConfirmed( external_multiaddr, )); } MappingState::Active(_) => { - log::debug!( - "succcessfully renewed UPnP mapping {} for {} protocol", - mapping.internal_addr, - mapping.protocol + tracing::debug!( + address=%mapping.internal_addr, + protocol=%mapping.protocol, + "successfully renewed UPnP mapping for protocol" ); } _ => unreachable!(), @@ -452,10 +460,10 @@ impl NetworkBehaviour for Behaviour { .expect("mapping should exist") { MappingState::Active(_) => { - log::debug!( - "failed to remap UPnP mapped {} for {} protocol: {err}", - mapping.internal_addr, - mapping.protocol + tracing::debug!( + address=%mapping.internal_addr, + protocol=%mapping.protocol, + "failed to remap UPnP mapped for protocol: {err}" ); let external_multiaddr = mapping.external_addr(gateway.external_addr); @@ -467,10 +475,10 @@ impl NetworkBehaviour for Behaviour { )); } MappingState::Pending => { - log::debug!( - "failed to map upnp mapped {} for {} protocol: {err}", - mapping.internal_addr, - mapping.protocol + tracing::debug!( + address=%mapping.internal_addr, + protocol=%mapping.protocol, + "failed to map UPnP mapped for protocol: {err}" ); } _ => { @@ -479,28 +487,28 @@ impl NetworkBehaviour for Behaviour { } } GatewayEvent::Removed(mapping) => { - log::debug!( - "succcessfully removed UPnP mapping {} for {} protocol", - mapping.internal_addr, - mapping.protocol + tracing::debug!( + address=%mapping.internal_addr, + protocol=%mapping.protocol, + "successfully removed UPnP mapping for protocol" ); self.mappings .remove(&mapping) .expect("mapping should exist"); } GatewayEvent::RemovalFailure(mapping, err) => { - log::debug!( - "could not remove UPnP mapping {} for {} protocol: {err}", - mapping.internal_addr, - mapping.protocol + tracing::debug!( + address=%mapping.internal_addr, + protocol=%mapping.protocol, + "could not remove UPnP mapping for protocol: {err}" ); if let Err(err) = gateway .sender .try_send(GatewayRequest::RemoveMapping(mapping.clone())) { - log::debug!( - "could not request port removal for {} on the gateway: {}", - mapping.multiaddr, + tracing::debug!( + multiaddress=%mapping.multiaddr, + "could not request port removal for multiaddress on the gateway: {}", err ); } diff --git a/swarm-test/Cargo.toml b/swarm-test/Cargo.toml index cee783983f7..12f8be2a1d8 100644 --- a/swarm-test/Cargo.toml +++ b/swarm-test/Cargo.toml @@ -20,8 +20,8 @@ libp2p-swarm = { workspace = true, features = ["async-std"] } libp2p-tcp = { workspace = true, features = ["async-io"] } libp2p-yamux = { workspace = true } futures = "0.3.29" -log = "0.4.20" rand = "0.8.5" +tracing = "0.1.37" futures-timer = "3.0.2" [lints] diff --git a/swarm-test/src/lib.rs b/swarm-test/src/lib.rs index 85bd9c22e9a..ee4058d530d 100644 --- a/swarm-test/src/lib.rs +++ b/swarm-test/src/lib.rs @@ -256,10 +256,16 @@ where listener_done = true; } Either::Left((other, _)) => { - log::debug!("Ignoring event from dialer {:?}", other); + tracing::debug!( + dialer=?other, + "Ignoring event from dialer" + ); } Either::Right((other, _)) => { - log::debug!("Ignoring event from listener {:?}", other); + tracing::debug!( + listener=?other, + "Ignoring event from listener" + ); } } @@ -277,7 +283,10 @@ where endpoint, peer_id, .. } => (endpoint.get_remote_address() == &addr).then_some(peer_id), other => { - log::debug!("Ignoring event from dialer {:?}", other); + tracing::debug!( + dialer=?other, + "Ignoring event from dialer" + ); None } }) @@ -316,7 +325,7 @@ where { Either::Left(((), _)) => panic!("Swarm did not emit an event within 10s"), Either::Right((event, _)) => { - log::trace!("Swarm produced: {:?}", event); + tracing::trace!(?event); event } @@ -333,7 +342,7 @@ where async fn loop_on_next(mut self) { while let Some(event) = self.next().await { - log::trace!("Swarm produced: {:?}", event); + tracing::trace!(?event); } } } diff --git a/swarm/Cargo.toml b/swarm/Cargo.toml index 116604987f8..fb28ff34d12 100644 --- a/swarm/Cargo.toml +++ b/swarm/Cargo.toml @@ -15,18 +15,18 @@ either = "1.9.0" fnv = "1.0" futures = "0.3.29" futures-timer = "3.0.2" +getrandom = { version = "0.2.9", features = ["js"], optional = true } # Explicit dependency to be used in `wasm-bindgen` feature instant = "0.1.12" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } libp2p-swarm-derive = { workspace = true, optional = true } -log = "0.4" +multistream-select = { workspace = true } +once_cell = "1.18.0" rand = "0.8" smallvec = "1.11.1" +tracing = "0.1.37" void = "1" wasm-bindgen-futures = { version = "0.4.37", optional = true } -getrandom = { version = "0.2.9", features = ["js"], optional = true } # Explicit dependency to be used in `wasm-bindgen` feature -once_cell = "1.18.0" -multistream-select = { workspace = true } [target.'cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))'.dependencies] async-std = { version = "1.6.2", optional = true } @@ -41,7 +41,6 @@ wasm-bindgen = ["dep:wasm-bindgen-futures", "dep:getrandom"] [dev-dependencies] async-std = { version = "1.6.2", features = ["attributes"] } either = "1.9.0" -env_logger = "0.10" futures = "0.3.29" libp2p-identify = { path = "../protocols/identify" } # Using `path` here because this is a cyclic dev-dependency which otherwise breaks releasing. libp2p-identity = { workspace = true, features = ["ed25519"] } @@ -56,6 +55,7 @@ void = "1" once_cell = "1.18.0" trybuild = "1.0.85" tokio = { version = "1.33.0", features = ["time", "rt", "macros", "rt-multi-thread"] } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [[test]] name = "swarm_derive" diff --git a/swarm/src/behaviour/external_addresses.rs b/swarm/src/behaviour/external_addresses.rs index 14cdb301fbd..579f46fe486 100644 --- a/swarm/src/behaviour/external_addresses.rs +++ b/swarm/src/behaviour/external_addresses.rs @@ -37,7 +37,7 @@ impl ExternalAddresses { self.addresses.remove(pos); self.push_front(addr); - log::debug!("Refreshed external address {addr}"); + tracing::debug!(address=%addr, "Refreshed external address"); return false; // No changes to our external addresses. } @@ -47,7 +47,11 @@ impl ExternalAddresses { if self.addresses.len() > MAX_LOCAL_EXTERNAL_ADDRS { let expired = self.addresses.pop().expect("list to be not empty"); - log::debug!("Removing previously confirmed external address {expired} because we reached the limit of {MAX_LOCAL_EXTERNAL_ADDRS} addresses"); + tracing::debug!( + external_address=%expired, + address_limit=%MAX_LOCAL_EXTERNAL_ADDRS, + "Removing previously confirmed external address because we reached the address limit" + ); } return true; diff --git a/swarm/src/connection.rs b/swarm/src/connection.rs index ee2729e0c82..35cc71d5354 100644 --- a/swarm/src/connection.rs +++ b/swarm/src/connection.rs @@ -192,7 +192,6 @@ where ProtocolsChange::Added(ProtocolsAdded::from_set(&initial_protocols)), )); } - Connection { muxing: muxer, handler, @@ -235,6 +234,7 @@ where /// Polls the handler and the substream, forwarding events from the former to the latter and /// vice versa. + #[tracing::instrument(level = "debug", name = "Connection::poll", skip(self, cx))] pub(crate) fn poll( self: Pin<&mut Self>, cx: &mut Context<'_>, @@ -252,6 +252,7 @@ where remote_supported_protocols, idle_timeout, stream_counter, + .. } = self.get_mut(); loop { @@ -346,15 +347,15 @@ where continue; } Poll::Ready(Some((_, Err(StreamUpgradeError::Io(e))))) => { - log::debug!("failed to upgrade inbound stream: {e}"); + tracing::debug!("failed to upgrade inbound stream: {e}"); continue; } Poll::Ready(Some((_, Err(StreamUpgradeError::NegotiationFailed)))) => { - log::debug!("no protocol could be agreed upon for inbound stream"); + tracing::debug!("no protocol could be agreed upon for inbound stream"); continue; } Poll::Ready(Some((_, Err(StreamUpgradeError::Timeout)))) => { - log::debug!("inbound stream upgrade timed out"); + tracing::debug!("inbound stream upgrade timed out"); continue; } } @@ -494,7 +495,7 @@ fn compute_new_shutdown( /// The [`Duration`] computed by the this function may not be the longest possible that we can add to `now` but it will work. fn checked_add_fraction(start: Instant, mut duration: Duration) -> Duration { while start.checked_add(duration).is_none() { - log::debug!("{start:?} + {duration:?} cannot be presented, halving duration"); + tracing::debug!(start=?start, duration=?duration, "start + duration cannot be presented, halving duration"); duration /= 2; } @@ -541,7 +542,7 @@ impl StreamUpgrade { { let effective_version = match version_override { Some(version_override) if version_override != upgrade::Version::default() => { - log::debug!( + tracing::debug!( "Substream upgrade protocol override: {:?} -> {:?}", upgrade::Version::default(), version_override @@ -753,11 +754,14 @@ mod tests { use quickcheck::*; use std::sync::{Arc, Weak}; use std::time::Instant; + use tracing_subscriber::EnvFilter; use void::Void; #[test] fn max_negotiating_inbound_streams() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); fn prop(max_negotiating_inbound_streams: u8) { let max_negotiating_inbound_streams: usize = max_negotiating_inbound_streams.into(); @@ -924,7 +928,9 @@ mod tests { #[test] fn checked_add_fraction_can_add_u64_max() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); let start = Instant::now(); let duration = checked_add_fraction(start, Duration::from_secs(u64::MAX)); @@ -934,7 +940,9 @@ mod tests { #[test] fn compute_new_shutdown_does_not_panic() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); #[derive(Debug)] struct ArbitraryShutdown(Shutdown); diff --git a/swarm/src/connection/pool.rs b/swarm/src/connection/pool.rs index 8a2f1cb6b20..cfa3fb7ea3c 100644 --- a/swarm/src/connection/pool.rs +++ b/swarm/src/connection/pool.rs @@ -49,6 +49,7 @@ use std::{ task::Context, task::Poll, }; +use tracing::Instrument; use void::Void; mod concurrent_dial; @@ -425,20 +426,22 @@ where dial_concurrency_factor_override: Option, connection_id: ConnectionId, ) { - let dial = ConcurrentDial::new( - dials, - dial_concurrency_factor_override.unwrap_or(self.dial_concurrency_factor), - ); + let concurrency_factor = + dial_concurrency_factor_override.unwrap_or(self.dial_concurrency_factor); + let span = tracing::debug_span!(parent: tracing::Span::none(), "new_outgoing_connection", %concurrency_factor, num_dials=%dials.len(), id = %connection_id); + span.follows_from(tracing::Span::current()); let (abort_notifier, abort_receiver) = oneshot::channel(); - self.executor - .spawn(task::new_for_pending_outgoing_connection( + self.executor.spawn( + task::new_for_pending_outgoing_connection( connection_id, - dial, + ConcurrentDial::new(dials, concurrency_factor), abort_receiver, self.pending_connection_events_tx.clone(), - )); + ) + .instrument(span), + ); let endpoint = PendingPoint::Dialer { role_override }; @@ -468,13 +471,18 @@ where let (abort_notifier, abort_receiver) = oneshot::channel(); - self.executor - .spawn(task::new_for_pending_incoming_connection( + let span = tracing::debug_span!(parent: tracing::Span::none(), "new_incoming_connection", remote_addr = %info.send_back_addr, id = %connection_id); + span.follows_from(tracing::Span::current()); + + self.executor.spawn( + task::new_for_pending_incoming_connection( connection_id, future, abort_receiver, self.pending_connection_events_tx.clone(), - )); + ) + .instrument(span), + ); self.counters.inc_pending_incoming(); self.pending.insert( @@ -497,7 +505,6 @@ where handler: THandler, ) { let connection = connection.extract(); - let conns = self.established.entry(obtained_peer_id).or_default(); self.counters.inc_established(endpoint); @@ -524,16 +531,23 @@ where self.idle_connection_timeout, ); - self.executor.spawn(task::new_for_established_connection( - id, - obtained_peer_id, - connection, - command_receiver, - event_sender, - )) + let span = tracing::debug_span!(parent: tracing::Span::none(), "new_established_connection", remote_addr = %endpoint.get_remote_address(), %id, peer = %obtained_peer_id); + span.follows_from(tracing::Span::current()); + + self.executor.spawn( + task::new_for_established_connection( + id, + obtained_peer_id, + connection, + command_receiver, + event_sender, + ) + .instrument(span), + ) } /// Polls the connection pool for events. + #[tracing::instrument(level = "debug", name = "Pool::poll", skip(self, cx))] pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll> where THandler: ConnectionHandler + 'static, @@ -685,10 +699,10 @@ where if let Err(error) = check_peer_id() { self.executor.spawn(poll_fn(move |cx| { if let Err(e) = ready!(muxer.poll_close_unpin(cx)) { - log::debug!( - "Failed to close connection {:?} to peer {}: {:?}", - id, - obtained_peer_id, + tracing::debug!( + peer=%obtained_peer_id, + connection=%id, + "Failed to close connection to peer: {:?}", e ); } diff --git a/swarm/src/handler/multi.rs b/swarm/src/handler/multi.rs index 41e0cf42df9..89d4d36fadc 100644 --- a/swarm/src/handler/multi.rs +++ b/swarm/src/handler/multi.rs @@ -161,7 +161,7 @@ where }, )); } else { - log::error!("FullyNegotiatedOutbound: no handler for key") + tracing::error!("FullyNegotiatedOutbound: no handler for key") } } ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { @@ -178,7 +178,7 @@ where )); } } else { - log::error!("FullyNegotiatedInbound: no handler for key") + tracing::error!("FullyNegotiatedInbound: no handler for key") } } ConnectionEvent::AddressChange(AddressChange { new_address }) => { @@ -198,7 +198,7 @@ where error, })); } else { - log::error!("DialUpgradeError: no handler for protocol") + tracing::error!("DialUpgradeError: no handler for protocol") } } ConnectionEvent::ListenUpgradeError(listen_upgrade_error) => { @@ -225,7 +225,7 @@ where if let Some(h) = self.handlers.get_mut(&key) { h.on_behaviour_event(event) } else { - log::error!("on_behaviour_event: no handler for key") + tracing::error!("on_behaviour_event: no handler for key") } } diff --git a/swarm/src/lib.rs b/swarm/src/lib.rs index 228c8281a70..7bbc1c68924 100644 --- a/swarm/src/lib.rs +++ b/swarm/src/lib.rs @@ -151,6 +151,7 @@ use std::{ pin::Pin, task::{Context, Poll}, }; +use tracing::Instrument; /// Event generated by the [`NetworkBehaviour`] that the swarm will report back. type TBehaviourOutEvent = ::ToSwarm; @@ -364,7 +365,7 @@ where local_peer_id: PeerId, config: Config, ) -> Self { - log::info!("Local peer id: {local_peer_id}"); + tracing::info!(%local_peer_id); Swarm { local_peer_id, @@ -482,7 +483,11 @@ where let num_addresses = addresses.len(); if num_addresses > 0 { - log::debug!("discarding {num_addresses} addresses from `NetworkBehaviour` because `DialOpts::extend_addresses_through_behaviour is `false` for connection {connection_id:?}") + tracing::debug!( + connection=%connection_id, + discarded_addresses_count=%num_addresses, + "discarding addresses from `NetworkBehaviour` because `DialOpts::extend_addresses_through_behaviour is `false` for connection" + ) } } } @@ -524,13 +529,22 @@ where .into_iter() .map(|a| match p2p_addr(peer_id, a) { Ok(address) => { - let dial = match dial_opts.role_override() { - Endpoint::Dialer => self.transport.dial(address.clone()), - Endpoint::Listener => self.transport.dial_as_listener(address.clone()), + let (dial, span) = match dial_opts.role_override() { + Endpoint::Dialer => ( + self.transport.dial(address.clone()), + tracing::debug_span!(parent: tracing::Span::none(), "Transport::dial", %address), + ), + Endpoint::Listener => ( + self.transport.dial_as_listener(address.clone()), + tracing::debug_span!(parent: tracing::Span::none(), "Transport::dial_as_listener", %address), + ), }; + span.follows_from(tracing::Span::current()); + match dial { Ok(fut) => fut .map(|r| (address, r.map_err(TransportError::Other))) + .instrument(span) .boxed(), Err(err) => futures::future::ready((address, Err(err))).boxed(), } @@ -769,11 +783,11 @@ where self.pool .spawn_connection(id, peer_id, &endpoint, connection, handler); - log::debug!( - "Connection established: {:?} {:?}; Total (peer): {}.", - peer_id, - endpoint, - num_established, + tracing::debug!( + peer=%peer_id, + ?endpoint, + total_peers=%num_established, + "Connection established" ); let failed_addresses = concurrent_dial_errors .as_ref() @@ -820,9 +834,9 @@ where })); if let Some(peer) = peer { - log::debug!("Connection attempt to {:?} failed with {:?}.", peer, error,); + tracing::debug!(%peer, "Connection attempt to peer failed with {:?}.", error,); } else { - log::debug!("Connection attempt to unknown peer failed with {:?}", error); + tracing::debug!("Connection attempt to unknown peer failed with {:?}", error); } self.pending_swarm_events @@ -840,7 +854,7 @@ where } => { let error = error.into(); - log::debug!("Incoming connection failed: {:?}", error); + tracing::debug!("Incoming connection failed: {:?}", error); self.behaviour .on_swarm_event(FromSwarm::ListenFailure(ListenFailure { local_addr: &local_addr, @@ -864,17 +878,17 @@ where .. } => { if let Some(error) = error.as_ref() { - log::debug!( - "Connection closed with error {:?}: {:?}; Total (peer): {}.", + tracing::debug!( + total_peers=%remaining_established_connection_ids.len(), + "Connection closed with error {:?}: {:?}", error, connected, - remaining_established_connection_ids.len() ); } else { - log::debug!( - "Connection closed: {:?}; Total (peer): {}.", - connected, - remaining_established_connection_ids.len() + tracing::debug!( + total_peers=%remaining_established_connection_ids.len(), + "Connection closed: {:?}", + connected ); } let peer_id = connected.peer_id; @@ -983,7 +997,11 @@ where listener_id, listen_addr, } => { - log::debug!("Listener {:?}; New address: {:?}", listener_id, listen_addr); + tracing::debug!( + listener=?listener_id, + address=%listen_addr, + "New listener address" + ); let addrs = self.listened_addrs.entry(listener_id).or_default(); if !addrs.contains(&listen_addr) { addrs.push(listen_addr.clone()) @@ -1003,10 +1021,10 @@ where listener_id, listen_addr, } => { - log::debug!( - "Listener {:?}; Expired address {:?}.", - listener_id, - listen_addr + tracing::debug!( + listener=?listener_id, + address=%listen_addr, + "Expired listener address" ); if let Some(addrs) = self.listened_addrs.get_mut(&listener_id) { addrs.retain(|a| a != &listen_addr); @@ -1026,7 +1044,11 @@ where listener_id, reason, } => { - log::debug!("Listener {:?}; Closed by {:?}.", listener_id, reason); + tracing::debug!( + listener=?listener_id, + ?reason, + "Listener closed" + ); let addrs = self.listened_addrs.remove(&listener_id).unwrap_or_default(); for addr in addrs.iter() { self.behaviour.on_swarm_event(FromSwarm::ExpiredListenAddr( @@ -1167,6 +1189,7 @@ where /// Internal function used by everything event-related. /// /// Polls the `Swarm` for the next event. + #[tracing::instrument(level = "debug", name = "Swarm::poll", skip(self, cx))] fn poll_next_event( mut self: Pin<&mut Self>, cx: &mut Context<'_>, @@ -1232,7 +1255,7 @@ where this.handle_pool_event(pool_event); continue; } - }; + } // Poll the listener(s) for new connections. match Pin::new(&mut this.transport).poll(cx) { @@ -2294,7 +2317,9 @@ mod tests { #[tokio::test] async fn aborting_pending_connection_surfaces_error() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); let mut dialer = new_test_swarm(Config::with_tokio_executor()); let mut listener = new_test_swarm(Config::with_tokio_executor()); diff --git a/transports/dns/Cargo.toml b/transports/dns/Cargo.toml index 6d217d8be31..df769161c55 100644 --- a/transports/dns/Cargo.toml +++ b/transports/dns/Cargo.toml @@ -11,21 +11,21 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] +async-std-resolver = { version = "0.23", optional = true } async-trait = "0.1.74" +futures = "0.3.28" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4.20" -futures = "0.3.29" -async-std-resolver = { version = "0.23", optional = true } parking_lot = "0.12.0" -trust-dns-resolver = { version = "0.23", default-features = false, features = ["system-config"] } smallvec = "1.11.1" +tracing = "0.1.37" +trust-dns-resolver = { version = "0.23", default-features = false, features = ["system-config"] } [dev-dependencies] -env_logger = "0.10" libp2p-identity = { workspace = true, features = ["rand"] } tokio-crate = { package = "tokio", version = "1.0", default-features = false, features = ["rt", "time"] } async-std-crate = { package = "async-std", version = "1.6" } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [features] async-std = ["async-std-resolver"] diff --git a/transports/dns/src/lib.rs b/transports/dns/src/lib.rs index a68a2c53d2d..13ad93952c9 100644 --- a/transports/dns/src/lib.rs +++ b/transports/dns/src/lib.rs @@ -277,7 +277,7 @@ where ) }) { if dns_lookups == MAX_DNS_LOOKUPS { - log::debug!("Too many DNS lookups. Dropping unresolved {}.", addr); + tracing::debug!(address=%addr, "Too many DNS lookups, dropping unresolved address"); last_err = Some(Error::TooManyLookups); // There may still be fully resolved addresses in `unresolved`, // so keep going until `unresolved` is empty. @@ -294,13 +294,13 @@ where last_err = Some(e); } Ok(Resolved::One(ip)) => { - log::trace!("Resolved {} -> {}", name, ip); + tracing::trace!(protocol=%name, resolved=%ip); let addr = addr.replace(i, |_| Some(ip)).expect("`i` is a valid index"); unresolved.push(addr); } Ok(Resolved::Many(ips)) => { for ip in ips { - log::trace!("Resolved {} -> {}", name, ip); + tracing::trace!(protocol=%name, resolved=%ip); let addr = addr.replace(i, |_| Some(ip)).expect("`i` is a valid index"); unresolved.push(addr); @@ -314,14 +314,14 @@ where if a.ends_with(&suffix) { if n < MAX_TXT_RECORDS { n += 1; - log::trace!("Resolved {} -> {}", name, a); + tracing::trace!(protocol=%name, resolved=%a); let addr = prefix.iter().chain(a.iter()).collect::(); unresolved.push(addr); } else { - log::debug!( - "Too many TXT records. Dropping resolved {}.", - a + tracing::debug!( + resolved=%a, + "Too many TXT records, dropping resolved" ); } } @@ -330,7 +330,7 @@ where } } else { // We have a fully resolved address, so try to dial it. - log::debug!("Dialing {}", addr); + tracing::debug!(address=%addr, "Dialing address"); let transport = inner.clone(); let dial = match role_override { @@ -354,12 +354,12 @@ where match result { Ok(out) => return Ok(out), Err(err) => { - log::debug!("Dial error: {:?}.", err); + tracing::debug!("Dial error: {:?}.", err); if unresolved.is_empty() { return Err(err); } if dial_attempts == MAX_DIAL_ATTEMPTS { - log::debug!( + tracing::debug!( "Aborting dialing after {} attempts.", MAX_DIAL_ATTEMPTS ); @@ -537,7 +537,7 @@ fn resolve<'a, E: 'a + Send, R: Resolver>( match parse_dnsaddr_txt(chars) { Err(e) => { // Skip over seemingly invalid entries. - log::debug!("Invalid TXT record: {:?}", e); + tracing::debug!("Invalid TXT record: {:?}", e); } Ok(a) => { addrs.push(a); @@ -612,7 +612,9 @@ mod tests { #[test] fn basic_resolve() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); #[derive(Clone)] struct CustomTransport; diff --git a/transports/noise/Cargo.toml b/transports/noise/Cargo.toml index c3d5a779b00..502cfdd99d9 100644 --- a/transports/noise/Cargo.toml +++ b/transports/noise/Cargo.toml @@ -15,7 +15,6 @@ curve25519-dalek = "4.1.1" futures = "0.3.29" libp2p-core = { workspace = true } libp2p-identity = { workspace = true, features = ["ed25519"] } -log = "0.4" multiaddr = { workspace = true } multihash = { workspace = true } once_cell = "1.18.0" @@ -24,6 +23,7 @@ rand = "0.8.3" sha2 = "0.10.8" static_assertions = "1" thiserror = "1.0.50" +tracing = "0.1.37" x25519-dalek = "2" zeroize = "1" @@ -34,9 +34,9 @@ snow = { version = "0.9.2", features = ["ring-resolver"], default-features = fal snow = { version = "0.9.2", features = ["default-resolver"], default-features = false } [dev-dependencies] -env_logger = "0.10.0" futures_ringbuf = "0.4.0" quickcheck = { workspace = true } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } libp2p-identity = { workspace = true, features = ["rand"] } # Passing arguments to the docsrs builder in order to properly document cfg's. diff --git a/transports/noise/src/io.rs b/transports/noise/src/io.rs index c43e1dd67a1..9cd4cfed52a 100644 --- a/transports/noise/src/io.rs +++ b/transports/noise/src/io.rs @@ -27,7 +27,6 @@ use bytes::Bytes; use framed::{Codec, MAX_FRAME_LEN}; use futures::prelude::*; use futures::ready; -use log::trace; use std::{ cmp::min, fmt, io, @@ -76,10 +75,10 @@ impl AsyncRead for Output { if len > 0 { let n = min(len - off, buf.len()); buf[..n].copy_from_slice(&self.recv_buffer[off..off + n]); - trace!("read: copied {}/{} bytes", off + n, len); + tracing::trace!(copied_bytes=%(off + n), total_bytes=%len, "read: copied"); self.recv_offset += n; if len == self.recv_offset { - trace!("read: frame consumed"); + tracing::trace!("read: frame consumed"); // Drop the existing view so `NoiseFramed` can reuse // the buffer when polling for the next frame below. self.recv_buffer = Bytes::new(); @@ -112,7 +111,7 @@ impl AsyncWrite for Output { // The MAX_FRAME_LEN is the maximum buffer size before a frame must be sent. if this.send_offset == MAX_FRAME_LEN { - trace!("write: sending {} bytes", MAX_FRAME_LEN); + tracing::trace!(bytes=%MAX_FRAME_LEN, "write: sending"); ready!(io.as_mut().poll_ready(cx))?; io.as_mut().start_send(frame_buf)?; this.send_offset = 0; @@ -124,7 +123,7 @@ impl AsyncWrite for Output { let n = min(MAX_FRAME_LEN - off, buf.len()); this.send_buffer[off..off + n].copy_from_slice(&buf[..n]); this.send_offset += n; - trace!("write: buffered {} bytes", this.send_offset); + tracing::trace!(bytes=%this.send_offset, "write: buffered"); Poll::Ready(Ok(n)) } @@ -137,7 +136,7 @@ impl AsyncWrite for Output { // Check if there is still one more frame to send. if this.send_offset > 0 { ready!(io.as_mut().poll_ready(cx))?; - trace!("flush: sending {} bytes", this.send_offset); + tracing::trace!(bytes= %this.send_offset, "flush: sending"); io.as_mut().start_send(frame_buf)?; this.send_offset = 0; } diff --git a/transports/noise/src/io/framed.rs b/transports/noise/src/io/framed.rs index 739b0eea426..b7504f2e37a 100644 --- a/transports/noise/src/io/framed.rs +++ b/transports/noise/src/io/framed.rs @@ -177,12 +177,12 @@ fn encrypt( encrypt_buffer: &mut BytesMut, encrypt_fn: impl FnOnce(&[u8], &mut [u8]) -> Result, ) -> io::Result<()> { - log::trace!("Encrypting {} bytes", cleartext.len()); + tracing::trace!("Encrypting {} bytes", cleartext.len()); encrypt_buffer.resize(cleartext.len() + EXTRA_ENCRYPT_SPACE, 0); let n = encrypt_fn(cleartext, encrypt_buffer).map_err(into_io_error)?; - log::trace!("Outgoing ciphertext has {n} bytes"); + tracing::trace!("Outgoing ciphertext has {n} bytes"); encode_length_prefixed(&encrypt_buffer[..n], dst); @@ -202,12 +202,12 @@ fn decrypt( None => return Ok(None), }; - log::trace!("Incoming ciphertext has {} bytes", ciphertext.len()); + tracing::trace!("Incoming ciphertext has {} bytes", ciphertext.len()); let mut decrypt_buffer = BytesMut::zeroed(ciphertext.len()); let n = decrypt_fn(&ciphertext, &mut decrypt_buffer).map_err(into_io_error)?; - log::trace!("Decrypted cleartext has {n} bytes"); + tracing::trace!("Decrypted cleartext has {n} bytes"); Ok(Some(decrypt_buffer.split_to(n).freeze())) } diff --git a/transports/noise/tests/smoke.rs b/transports/noise/tests/smoke.rs index ffcf7934ac0..0afebc0cbea 100644 --- a/transports/noise/tests/smoke.rs +++ b/transports/noise/tests/smoke.rs @@ -24,9 +24,9 @@ use libp2p_core::upgrade; use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; use libp2p_identity as identity; use libp2p_noise as noise; -use log::info; use quickcheck::*; use std::{convert::TryInto, io}; +use tracing_subscriber::EnvFilter; #[allow(dead_code)] fn core_upgrade_compat() { @@ -41,7 +41,9 @@ fn core_upgrade_compat() { #[test] fn xx() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); fn prop(mut messages: Vec) -> bool { messages.truncate(5); let server_id = identity::Keypair::generate_ed25519(); @@ -86,7 +88,7 @@ fn xx() { Err(e) => panic!("error reading len: {e}"), } }; - info!("server: reading message ({} bytes)", len); + tracing::info!(bytes=%len, "server: reading message"); let mut server_buffer = vec![0; len.try_into().unwrap()]; server_session .read_exact(&mut server_buffer) diff --git a/transports/plaintext/Cargo.toml b/transports/plaintext/Cargo.toml index 33c2eeab7ae..a64f6ce8e9f 100644 --- a/transports/plaintext/Cargo.toml +++ b/transports/plaintext/Cargo.toml @@ -16,16 +16,16 @@ bytes = "1" futures = "0.3.29" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4.20" quick-protobuf = "0.8" +tracing = "0.1.37" quick-protobuf-codec = { workspace = true } [dev-dependencies] -env_logger = "0.10.0" libp2p-identity = { workspace = true, features = ["ed25519", "rand"] } quickcheck = { workspace = true } rand = "0.8" futures_ringbuf = "0.4.0" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/transports/plaintext/src/handshake.rs b/transports/plaintext/src/handshake.rs index 51dd5501ea0..ddd5f7f8a9b 100644 --- a/transports/plaintext/src/handshake.rs +++ b/transports/plaintext/src/handshake.rs @@ -25,7 +25,6 @@ use asynchronous_codec::{Framed, FramedParts}; use bytes::Bytes; use futures::prelude::*; use libp2p_identity::{PeerId, PublicKey}; -use log::{debug, trace}; use std::io::{Error as IoError, ErrorKind as IoErrorKind}; pub(crate) async fn handshake(socket: S, config: Config) -> Result<(S, PublicKey, Bytes), Error> @@ -35,7 +34,7 @@ where // The handshake messages all start with a variable-length integer indicating the size. let mut framed_socket = Framed::new(socket, quick_protobuf_codec::Codec::::new(100)); - trace!("sending exchange to remote"); + tracing::trace!("sending exchange to remote"); framed_socket .send(Exchange { id: Some(config.local_public_key.to_peer_id().to_bytes()), @@ -44,7 +43,7 @@ where .await .map_err(DecodeError)?; - trace!("receiving the remote's exchange"); + tracing::trace!("receiving the remote's exchange"); let public_key = match framed_socket .next() .await @@ -62,13 +61,13 @@ where public_key } None => { - debug!("unexpected eof while waiting for remote's exchange"); + tracing::debug!("unexpected eof while waiting for remote's exchange"); let err = IoError::new(IoErrorKind::BrokenPipe, "unexpected eof"); return Err(err.into()); } }; - trace!("received exchange from remote; pubkey = {:?}", public_key); + tracing::trace!(?public_key, "received exchange from remote"); let FramedParts { io, diff --git a/transports/plaintext/src/lib.rs b/transports/plaintext/src/lib.rs index bdca271a68e..4a322d63fab 100644 --- a/transports/plaintext/src/lib.rs +++ b/transports/plaintext/src/lib.rs @@ -32,7 +32,6 @@ use libp2p_core::UpgradeInfo; use libp2p_identity as identity; use libp2p_identity::PeerId; use libp2p_identity::PublicKey; -use log::debug; use std::{ io, iter, pin::Pin, @@ -101,9 +100,9 @@ impl Config { where T: AsyncRead + AsyncWrite + Send + Unpin + 'static, { - debug!("Starting plaintext handshake."); + tracing::debug!("Starting plaintext handshake."); let (socket, remote_key, read_buffer) = handshake::handshake(socket, self).await?; - debug!("Finished plaintext handshake."); + tracing::debug!("Finished plaintext handshake."); Ok(( remote_key.to_peer_id(), diff --git a/transports/plaintext/tests/smoke.rs b/transports/plaintext/tests/smoke.rs index fd3350fb5aa..f77f23d3ad3 100644 --- a/transports/plaintext/tests/smoke.rs +++ b/transports/plaintext/tests/smoke.rs @@ -22,12 +22,14 @@ use futures::io::{AsyncReadExt, AsyncWriteExt}; use libp2p_core::upgrade::InboundConnectionUpgrade; use libp2p_identity as identity; use libp2p_plaintext as plaintext; -use log::debug; use quickcheck::QuickCheck; +use tracing_subscriber::EnvFilter; #[test] fn variable_msg_length() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); fn prop(msg: Vec) { let msg_to_send = msg.clone(); @@ -53,18 +55,18 @@ fn variable_msg_length() { assert_eq!(received_client_id, client_id.public().to_peer_id()); let client_fut = async { - debug!("Client: writing message."); + tracing::debug!("Client: writing message."); client_channel .write_all(&msg_to_send) .await .expect("no error"); - debug!("Client: flushing channel."); + tracing::debug!("Client: flushing channel."); client_channel.flush().await.expect("no error"); }; let server_fut = async { let mut server_buffer = vec![0; msg_to_receive.len()]; - debug!("Server: reading message."); + tracing::debug!("Server: reading message."); server_channel .read_exact(&mut server_buffer) .await diff --git a/transports/pnet/Cargo.toml b/transports/pnet/Cargo.toml index 16e34a4a9ee..000cf0eb203 100644 --- a/transports/pnet/Cargo.toml +++ b/transports/pnet/Cargo.toml @@ -12,9 +12,9 @@ categories = ["network-programming", "asynchronous"] [dependencies] futures = "0.3.29" -log = "0.4.20" salsa20 = "0.10" sha3 = "0.10" +tracing = "0.1.37" rand = "0.8" pin-project = "1.1.3" diff --git a/transports/pnet/src/crypt_writer.rs b/transports/pnet/src/crypt_writer.rs index c5993548239..06f932fbe71 100644 --- a/transports/pnet/src/crypt_writer.rs +++ b/transports/pnet/src/crypt_writer.rs @@ -23,7 +23,6 @@ use futures::{ ready, task::{Context, Poll}, }; -use log::trace; use pin_project::pin_project; use salsa20::{cipher::StreamCipher, XSalsa20}; use std::{fmt, pin::Pin}; @@ -120,7 +119,7 @@ impl AsyncWrite for CryptWriter { let res = Pin::new(&mut *this.buf).poll_write(cx, buf); if let Poll::Ready(Ok(count)) = res { this.cipher.apply_keystream(&mut this.buf[0..count]); - trace!("encrypted {} bytes", count); + tracing::trace!(bytes=%count, "encrypted bytes"); } else { debug_assert!(false); }; diff --git a/transports/pnet/src/lib.rs b/transports/pnet/src/lib.rs index d8aac22eecd..083ffff36a3 100644 --- a/transports/pnet/src/lib.rs +++ b/transports/pnet/src/lib.rs @@ -29,7 +29,6 @@ mod crypt_writer; use crypt_writer::CryptWriter; use futures::prelude::*; -use log::trace; use pin_project::pin_project; use rand::RngCore; use salsa20::{ @@ -210,7 +209,7 @@ impl PnetConfig { where TSocket: AsyncRead + AsyncWrite + Send + Unpin + 'static, { - trace!("exchanging nonces"); + tracing::trace!("exchanging nonces"); let mut local_nonce = [0u8; NONCE_SIZE]; let mut remote_nonce = [0u8; NONCE_SIZE]; rand::thread_rng().fill_bytes(&mut local_nonce); @@ -223,7 +222,7 @@ impl PnetConfig { .read_exact(&mut remote_nonce) .await .map_err(PnetError::HandshakeError)?; - trace!("setting up ciphers"); + tracing::trace!("setting up ciphers"); let write_cipher = XSalsa20::new(&self.key.0.into(), &local_nonce.into()); let read_cipher = XSalsa20::new(&self.key.0.into(), &remote_nonce.into()); Ok(PnetOutput::new(socket, write_cipher, read_cipher)) @@ -257,9 +256,9 @@ impl AsyncRead for PnetOutput { let this = self.project(); let result = this.inner.get_pin_mut().poll_read(cx, buf); if let Poll::Ready(Ok(size)) = &result { - trace!("read {} bytes", size); + tracing::trace!(bytes=%size, "read bytes"); this.read_cipher.apply_keystream(&mut buf[..*size]); - trace!("decrypted {} bytes", size); + tracing::trace!(bytes=%size, "decrypted bytes"); } result } diff --git a/transports/quic/Cargo.toml b/transports/quic/Cargo.toml index afee991c76a..4ce23bf1207 100644 --- a/transports/quic/Cargo.toml +++ b/transports/quic/Cargo.toml @@ -17,13 +17,13 @@ if-watch = "3.1.0" libp2p-core = { workspace = true } libp2p-tls = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4" parking_lot = "0.12.0" quinn = { version = "0.10.2", default-features = false, features = ["tls-rustls", "futures-io"] } rand = "0.8.5" rustls = { version = "0.21.8", default-features = false } thiserror = "1.0.50" tokio = { version = "1.33.0", default-features = false, features = ["net", "rt", "time"], optional = true } +tracing = "0.1.37" socket2 = "0.5.5" ring = "0.16.20" @@ -40,7 +40,6 @@ rustc-args = ["--cfg", "docsrs"] [dev-dependencies] async-std = { version = "1.12.0", features = ["attributes"] } -env_logger = "0.10.0" libp2p-identity = { workspace = true, features = ["rand"] } libp2p-muxer-test-harness = { path = "../../muxers/test-harness" } libp2p-noise = { workspace = true } @@ -48,6 +47,7 @@ libp2p-tcp = { workspace = true, features = ["async-io"] } libp2p-yamux = { workspace = true } quickcheck = "1" tokio = { version = "1.33.0", features = ["macros", "rt-multi-thread", "time"] } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [[test]] name = "stream_compliance" diff --git a/transports/quic/src/hole_punching.rs b/transports/quic/src/hole_punching.rs index 41f55c5cada..605799af5e1 100644 --- a/transports/quic/src/hole_punching.rs +++ b/transports/quic/src/hole_punching.rs @@ -34,7 +34,7 @@ async fn punch_holes( .take(64) .collect(); - log::trace!("Sending random UDP packet to {remote_addr}"); + tracing::trace!("Sending random UDP packet to {remote_addr}"); P::send_to(&socket, &contents, remote_addr).await?; diff --git a/transports/quic/src/transport.rs b/transports/quic/src/transport.rs index 24527649edf..feda501464f 100644 --- a/transports/quic/src/transport.rs +++ b/transports/quic/src/transport.rs @@ -318,7 +318,7 @@ impl Transport for GenTransport

{ .try_clone_socket() .map_err(Self::Error::from)?; - log::debug!("Preparing for hole-punch from {addr}"); + tracing::debug!("Preparing for hole-punch from {addr}"); let hole_puncher = hole_puncher::

(socket, socket_addr, self.handshake_timeout); @@ -348,7 +348,12 @@ impl Transport for GenTransport

{ .expect("hole punch connection sender is never dropped before receiver") .await?; if inbound_peer_id != peer_id { - log::warn!("expected inbound connection from {socket_addr} to resolve to {peer_id} but got {inbound_peer_id}"); + tracing::warn!( + peer=%peer_id, + inbound_peer=%inbound_peer_id, + socket_address=%socket_addr, + "expected inbound connection from socket_address to resolve to peer but got inbound peer" + ); } Ok((inbound_peer_id, connection)) } @@ -527,7 +532,10 @@ impl Listener

{ if let Some(listen_addr) = ip_to_listenaddr(&endpoint_addr, inet.addr(), self.version) { - log::debug!("New listen address: {listen_addr}"); + tracing::debug!( + address=%listen_addr, + "New listen address" + ); self.listening_addresses.insert(inet.addr()); return Poll::Ready(TransportEvent::NewAddress { listener_id: self.listener_id, @@ -539,7 +547,10 @@ impl Listener

{ if let Some(listen_addr) = ip_to_listenaddr(&endpoint_addr, inet.addr(), self.version) { - log::debug!("Expired listen address: {listen_addr}"); + tracing::debug!( + address=%listen_addr, + "Expired listen address" + ); self.listening_addresses.remove(&inet.addr()); return Poll::Ready(TransportEvent::AddressExpired { listener_id: self.listener_id, diff --git a/transports/quic/tests/smoke.rs b/transports/quic/tests/smoke.rs index f72a6494e64..77dfac6bb44 100644 --- a/transports/quic/tests/smoke.rs +++ b/transports/quic/tests/smoke.rs @@ -26,6 +26,7 @@ use std::{ pin::Pin, sync::{Arc, Mutex}, }; +use tracing_subscriber::EnvFilter; #[cfg(feature = "tokio")] #[tokio::test] @@ -42,7 +43,9 @@ async fn async_std_smoke() { #[cfg(feature = "tokio")] #[tokio::test] async fn endpoint_reuse() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (_, mut a_transport) = create_default_transport::(); let (_, mut b_transport) = create_default_transport::(); @@ -67,7 +70,9 @@ async fn endpoint_reuse() { #[cfg(feature = "async-std")] #[async_std::test] async fn ipv4_dial_ipv6() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (a_peer_id, mut a_transport) = create_default_transport::(); let (b_peer_id, mut b_transport) = create_default_transport::(); @@ -85,7 +90,9 @@ async fn ipv4_dial_ipv6() { #[cfg(feature = "async-std")] #[async_std::test] async fn wrapped_with_delay() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); struct DialDelay(Arc>>); @@ -253,7 +260,9 @@ async fn tcp_and_quic() { #[cfg(feature = "async-std")] #[test] fn concurrent_connections_and_streams_async_std() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); quickcheck::QuickCheck::new() .min_tests_passed(1) @@ -264,7 +273,9 @@ fn concurrent_connections_and_streams_async_std() { #[cfg(feature = "tokio")] #[test] fn concurrent_connections_and_streams_tokio() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let rt = tokio::runtime::Runtime::new().unwrap(); let _guard = rt.enter(); @@ -281,7 +292,9 @@ async fn draft_29_support() { use futures::{future::poll_fn, select}; use libp2p_core::transport::TransportError; - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (_, mut a_transport) = create_transport::(|cfg| cfg.support_draft_29 = true); @@ -342,7 +355,9 @@ async fn draft_29_support() { #[cfg(feature = "async-std")] #[async_std::test] async fn backpressure() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let max_stream_data = quic::Config::new(&generate_tls_keypair()).max_stream_data; let (mut stream_a, mut stream_b) = build_streams::().await; @@ -366,7 +381,9 @@ async fn backpressure() { #[cfg(feature = "async-std")] #[async_std::test] async fn read_after_peer_dropped_stream() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (mut stream_a, mut stream_b) = build_streams::().await; let data = vec![0; 10]; @@ -386,7 +403,9 @@ async fn read_after_peer_dropped_stream() { #[async_std::test] #[should_panic] async fn write_after_peer_dropped_stream() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (stream_a, mut stream_b) = build_streams::().await; drop(stream_a); futures_timer::Delay::new(Duration::from_millis(1)).await; @@ -440,7 +459,9 @@ async fn test_local_listener_reuse() { } async fn smoke() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (a_peer_id, mut a_transport) = create_default_transport::

(); let (b_peer_id, mut b_transport) = create_default_transport::

(); @@ -562,7 +583,11 @@ fn prop( let (listeners_tx, mut listeners_rx) = mpsc::channel(number_listeners); - log::info!("Creating {number_streams} streams on {number_listeners} connections"); + tracing::info!( + stream_count=%number_streams, + connection_count=%number_listeners, + "Creating streams on connections" + ); // Spawn the listener nodes. for _ in 0..number_listeners { @@ -703,7 +728,10 @@ async fn open_outbound_streams( }); } - log::info!("Created {number_streams} streams"); + tracing::info!( + stream_count=%number_streams, + "Created streams" + ); while future::poll_fn(|cx| connection.poll_unpin(cx)) .await diff --git a/transports/tcp/Cargo.toml b/transports/tcp/Cargo.toml index cb54c98e7ba..37e85d04ded 100644 --- a/transports/tcp/Cargo.toml +++ b/transports/tcp/Cargo.toml @@ -18,9 +18,9 @@ if-watch = "3.1.0" libc = "0.2.149" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4.20" socket2 = { version = "0.5.5", features = ["all"] } tokio = { version = "1.33.0", default-features = false, features = ["net"], optional = true } +tracing = "0.1.37" [features] tokio = ["dep:tokio", "if-watch/tokio"] @@ -30,7 +30,7 @@ async-io = ["dep:async-io", "if-watch/smol"] async-std = { version = "1.6.5", features = ["attributes"] } libp2p-identity = { workspace = true, features = ["rand"] } tokio = { version = "1.33.0", default-features = false, features = ["full"] } -env_logger = "0.10.0" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/transports/tcp/src/lib.rs b/transports/tcp/src/lib.rs index 5efdf16fff5..b466f387ba4 100644 --- a/transports/tcp/src/lib.rs +++ b/transports/tcp/src/lib.rs @@ -98,7 +98,7 @@ impl PortReuse { /// Has no effect if port reuse is disabled. fn register(&mut self, ip: IpAddr, port: Port) { if let PortReuse::Enabled { listen_addrs } = self { - log::trace!("Registering for port reuse: {}:{}", ip, port); + tracing::trace!(%ip, %port, "Registering for port reuse"); listen_addrs .write() .expect("`register()` and `unregister()` never panic while holding the lock") @@ -111,7 +111,7 @@ impl PortReuse { /// Has no effect if port reuse is disabled. fn unregister(&mut self, ip: IpAddr, port: Port) { if let PortReuse::Enabled { listen_addrs } = self { - log::trace!("Unregistering for port reuse: {}:{}", ip, port); + tracing::trace!(%ip, %port, "Unregistering for port reuse"); listen_addrs .write() .expect("`register()` and `unregister()` never panic while holding the lock") @@ -446,7 +446,7 @@ where } else { return Err(TransportError::MultiaddrNotSupported(addr)); }; - log::debug!("listening on {}", socket_addr); + tracing::debug!(address=%socket_addr, "listening on address"); let listener = self .do_listen(id, socket_addr) .map_err(TransportError::Other)?; @@ -472,14 +472,14 @@ where } else { return Err(TransportError::MultiaddrNotSupported(addr)); }; - log::debug!("dialing {}", socket_addr); + tracing::debug!(address=%socket_addr, "dialing address"); let socket = self .create_socket(socket_addr) .map_err(TransportError::Other)?; if let Some(addr) = self.port_reuse.local_dial_addr(&socket_addr.ip()) { - log::trace!("Binding dial socket to listen socket {}", addr); + tracing::trace!(address=%addr, "Binding dial socket to listen socket address"); socket.bind(&addr.into()).map_err(TransportError::Other)?; } @@ -538,6 +538,7 @@ where } /// Poll all listeners. + #[tracing::instrument(level = "trace", name = "Transport::poll", skip(self, cx))] fn poll( mut self: Pin<&mut Self>, cx: &mut Context<'_>, @@ -677,7 +678,7 @@ where let ip = inet.addr(); if self.listen_addr.is_ipv4() == ip.is_ipv4() { let ma = ip_to_multiaddr(ip, my_listen_addr_port); - log::debug!("New listen address: {}", ma); + tracing::debug!(address=%ma, "New listen address"); self.port_reuse.register(ip, my_listen_addr_port); return Poll::Ready(TransportEvent::NewAddress { listener_id: self.listener_id, @@ -689,7 +690,7 @@ where let ip = inet.addr(); if self.listen_addr.is_ipv4() == ip.is_ipv4() { let ma = ip_to_multiaddr(ip, my_listen_addr_port); - log::debug!("Expired listen address: {}", ma); + tracing::debug!(address=%ma, "Expired listen address"); self.port_reuse.unregister(ip, my_listen_addr_port); return Poll::Ready(TransportEvent::AddressExpired { listener_id: self.listener_id, @@ -762,7 +763,11 @@ where let local_addr = ip_to_multiaddr(local_addr.ip(), local_addr.port()); let remote_addr = ip_to_multiaddr(remote_addr.ip(), remote_addr.port()); - log::debug!("Incoming connection from {} at {}", remote_addr, local_addr); + tracing::debug!( + remote_address=%remote_addr, + local_address=%local_addr, + "Incoming connection from remote at local" + ); return Poll::Ready(Some(TransportEvent::Incoming { listener_id: self.listener_id, @@ -900,7 +905,9 @@ mod tests { #[test] fn communicating_between_dialer_and_listener() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); async fn listener(addr: Multiaddr, mut ready_tx: mpsc::Sender) { let mut tcp = Transport::::default().boxed(); @@ -969,7 +976,9 @@ mod tests { #[test] fn wildcard_expansion() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); async fn listener(addr: Multiaddr, mut ready_tx: mpsc::Sender) { let mut tcp = Transport::::default().boxed(); @@ -1038,7 +1047,9 @@ mod tests { #[test] fn port_reuse_dialing() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); async fn listener( addr: Multiaddr, @@ -1145,7 +1156,9 @@ mod tests { #[test] fn port_reuse_listening() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); async fn listen_twice(addr: Multiaddr) { let mut tcp = Transport::::new(Config::new().port_reuse(true)); @@ -1199,7 +1212,9 @@ mod tests { #[test] fn listen_port_0() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); async fn listen(addr: Multiaddr) -> Multiaddr { let mut tcp = Transport::::default().boxed(); @@ -1234,7 +1249,9 @@ mod tests { #[test] fn listen_invalid_addr() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); fn test(addr: Multiaddr) { #[cfg(feature = "async-io")] @@ -1304,7 +1321,9 @@ mod tests { #[test] fn test_remove_listener() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); async fn cycle_listeners() -> bool { let mut tcp = Transport::::default().boxed(); diff --git a/transports/uds/Cargo.toml b/transports/uds/Cargo.toml index f27f0647681..9d480fd1dbe 100644 --- a/transports/uds/Cargo.toml +++ b/transports/uds/Cargo.toml @@ -13,9 +13,9 @@ categories = ["network-programming", "asynchronous"] [dependencies] async-std = { version = "1.6.2", optional = true } libp2p-core = { workspace = true } -log = "0.4.20" futures = "0.3.29" tokio = { version = "1.33", default-features = false, features = ["net"], optional = true } +tracing = "0.1.37" [dev-dependencies] tempfile = "3.8" diff --git a/transports/uds/src/lib.rs b/transports/uds/src/lib.rs index 3cd71552d18..075cbadb80a 100644 --- a/transports/uds/src/lib.rs +++ b/transports/uds/src/lib.rs @@ -49,7 +49,6 @@ use libp2p_core::{ transport::{TransportError, TransportEvent}, Transport, }; -use log::debug; use std::collections::VecDeque; use std::pin::Pin; use std::task::{Context, Poll}; @@ -104,7 +103,7 @@ macro_rules! codegen { stream::once({ let addr = addr.clone(); async move { - debug!("Now listening on {}", addr); + tracing::debug!(address=%addr, "Now listening on address"); Ok(TransportEvent::NewAddress { listener_id: id, listen_addr: addr, @@ -118,7 +117,7 @@ macro_rules! codegen { async move { let event = match listener.accept().await { Ok((stream, _)) => { - debug!("incoming connection on {}", addr); + tracing::debug!(address=%addr, "incoming connection on address"); TransportEvent::Incoming { upgrade: future::ok(stream), local_addr: addr.clone(), @@ -163,7 +162,7 @@ macro_rules! codegen { fn dial(&mut self, addr: Multiaddr) -> Result> { // TODO: Should we dial at all? if let Ok(path) = multiaddr_to_path(&addr) { - debug!("Dialing {}", addr); + tracing::debug!(address=%addr, "Dialing address"); Ok(async move { <$unix_stream>::connect(&path).await }.boxed()) } else { Err(TransportError::MultiaddrNotSupported(addr)) diff --git a/transports/webrtc-websys/Cargo.toml b/transports/webrtc-websys/Cargo.toml index 847e54abbd5..3e2659c71c8 100644 --- a/transports/webrtc-websys/Cargo.toml +++ b/transports/webrtc-websys/Cargo.toml @@ -22,10 +22,10 @@ libp2p-core = { workspace = true } libp2p-identity = { workspace = true } libp2p-noise = { workspace = true } libp2p-webrtc-utils = { workspace = true } -log = "0.4.19" send_wrapper = { version = "0.6.0", features = ["futures"] } serde = { version = "1.0", features = ["derive"] } thiserror = "1" +tracing = "0.1.37" wasm-bindgen = { version = "0.2.87" } wasm-bindgen-futures = { version = "0.4.37" } web-sys = { version = "0.3.64", features = ["Document", "Location", "MessageEvent", "Navigator", "RtcCertificate", "RtcConfiguration", "RtcDataChannel", "RtcDataChannelEvent", "RtcDataChannelInit", "RtcDataChannelState", "RtcDataChannelType", "RtcPeerConnection", "RtcSdpType", "RtcSessionDescription", "RtcSessionDescriptionInit", "Window"] } diff --git a/transports/webrtc-websys/src/connection.rs b/transports/webrtc-websys/src/connection.rs index b026aec0b40..d0c8968f62e 100644 --- a/transports/webrtc-websys/src/connection.rs +++ b/transports/webrtc-websys/src/connection.rs @@ -47,16 +47,16 @@ impl Connection { let (mut tx_ondatachannel, rx_ondatachannel) = mpsc::channel(4); // we may get more than one data channel opened on a single peer connection let ondatachannel_closure = Closure::new(move |ev: RtcDataChannelEvent| { - log::trace!("New data channel"); + tracing::trace!("New data channel"); if let Err(e) = tx_ondatachannel.try_send(ev.channel()) { if e.is_full() { - log::warn!("Remote is opening too many data channels, we can't keep up!"); + tracing::warn!("Remote is opening too many data channels, we can't keep up!"); return; } if e.is_disconnected() { - log::warn!("Receiver is gone, are we shutting down?"); + tracing::warn!("Receiver is gone, are we shutting down?"); } } }); @@ -90,7 +90,7 @@ impl Connection { /// if they are used. fn close_connection(&mut self) { if !self.closed { - log::trace!("connection::close_connection"); + tracing::trace!("connection::close_connection"); self.inner.inner.close(); self.closed = true; } @@ -121,7 +121,7 @@ impl StreamMuxer for Connection { } None => { // This only happens if the [`RtcPeerConnection::ondatachannel`] closure gets freed which means we are most likely shutting down the connection. - log::debug!("`Sender` for inbound data channels has been dropped"); + tracing::debug!("`Sender` for inbound data channels has been dropped"); Poll::Ready(Err(Error::Connection("connection closed".to_owned()))) } } @@ -131,7 +131,7 @@ impl StreamMuxer for Connection { mut self: Pin<&mut Self>, _: &mut Context<'_>, ) -> Poll> { - log::trace!("Creating outbound data channel"); + tracing::trace!("Creating outbound data channel"); let data_channel = self.inner.new_regular_data_channel(); let stream = self.new_stream_from_data_channel(data_channel); @@ -144,7 +144,7 @@ impl StreamMuxer for Connection { mut self: Pin<&mut Self>, _cx: &mut Context<'_>, ) -> Poll> { - log::trace!("connection::poll_close"); + tracing::trace!("connection::poll_close"); self.close_connection(); Poll::Ready(Ok(())) @@ -158,7 +158,7 @@ impl StreamMuxer for Connection { match ready!(self.drop_listeners.poll_next_unpin(cx)) { Some(Ok(())) => {} Some(Err(e)) => { - log::debug!("a DropListener failed: {e}") + tracing::debug!("a DropListener failed: {e}") } None => { self.no_drop_listeners_waker = Some(cx.waker().clone()); diff --git a/transports/webrtc-websys/src/sdp.rs b/transports/webrtc-websys/src/sdp.rs index 6f50262b988..439182ea4db 100644 --- a/transports/webrtc-websys/src/sdp.rs +++ b/transports/webrtc-websys/src/sdp.rs @@ -46,7 +46,7 @@ pub(crate) fn offer(offer: String, client_ufrag: &str) -> RtcSessionDescriptionI // remove any double \r\n let munged_sdp_offer = munged_sdp_offer.replace("\r\n\r\n", "\r\n"); - log::trace!("Created SDP offer: {munged_sdp_offer}"); + tracing::trace!(offer=%munged_sdp_offer, "Created SDP offer"); let mut offer_obj = RtcSessionDescriptionInit::new(RtcSdpType::Offer); offer_obj.sdp(&munged_sdp_offer); diff --git a/transports/webrtc-websys/src/stream/poll_data_channel.rs b/transports/webrtc-websys/src/stream/poll_data_channel.rs index 9c9b19cdb32..0ee4f7920c9 100644 --- a/transports/webrtc-websys/src/stream/poll_data_channel.rs +++ b/transports/webrtc-websys/src/stream/poll_data_channel.rs @@ -53,7 +53,7 @@ impl PollDataChannel { let open_waker = open_waker.clone(); move |_: RtcDataChannelEvent| { - log::trace!("DataChannel opened"); + tracing::trace!("DataChannel opened"); open_waker.wake(); } }); @@ -65,7 +65,7 @@ impl PollDataChannel { let write_waker = write_waker.clone(); move |_: Event| { - log::trace!("DataChannel available for writing (again)"); + tracing::trace!("DataChannel available for writing (again)"); write_waker.wake(); } }); @@ -76,7 +76,7 @@ impl PollDataChannel { let close_waker = close_waker.clone(); move |_: Event| { - log::trace!("DataChannel closed"); + tracing::trace!("DataChannel closed"); close_waker.wake(); } }); @@ -98,7 +98,7 @@ impl PollDataChannel { if read_buffer.len() + data.length() as usize > MAX_MSG_LEN { overloaded.store(true, Ordering::SeqCst); - log::warn!("Remote is overloading us with messages, resetting stream",); + tracing::warn!("Remote is overloading us with messages, resetting stream",); return; } diff --git a/transports/webrtc-websys/src/upgrade.rs b/transports/webrtc-websys/src/upgrade.rs index 092baed50c4..cc053835041 100644 --- a/transports/webrtc-websys/src/upgrade.rs +++ b/transports/webrtc-websys/src/upgrade.rs @@ -45,12 +45,12 @@ async fn outbound_inner( let local_fingerprint = rtc_peer_connection.local_fingerprint()?; - log::trace!("local_fingerprint: {:?}", local_fingerprint); - log::trace!("remote_fingerprint: {:?}", remote_fingerprint); + tracing::trace!(?local_fingerprint); + tracing::trace!(?remote_fingerprint); let peer_id = noise::outbound(id_keys, channel, remote_fingerprint, local_fingerprint).await?; - log::debug!("Remote peer identified as {peer_id}"); + tracing::debug!(peer=%peer_id, "Remote peer identified"); Ok((peer_id, Connection::new(rtc_peer_connection))) } diff --git a/transports/webrtc/Cargo.toml b/transports/webrtc/Cargo.toml index 2379c299527..e8306f7a8ba 100644 --- a/transports/webrtc/Cargo.toml +++ b/transports/webrtc/Cargo.toml @@ -21,7 +21,6 @@ libp2p-core = { workspace = true } libp2p-noise = { workspace = true } libp2p-identity = { workspace = true } libp2p-webrtc-utils = { workspace = true } -log = "0.4" multihash = { workspace = true } rand = "0.8" rcgen = "0.11.3" @@ -31,6 +30,7 @@ thiserror = "1" tinytemplate = "1.2" tokio = { version = "1.33", features = ["net"], optional = true } tokio-util = { version = "0.7", features = ["compat"], optional = true } +tracing = "0.1.37" webrtc = { version = "0.9.0", optional = true } [features] @@ -38,10 +38,11 @@ tokio = ["dep:tokio", "dep:tokio-util", "dep:webrtc", "if-watch/tokio"] pem = ["webrtc?/pem"] [dev-dependencies] -env_logger = "0.10" libp2p-identity = { workspace = true, features = ["rand"] } tokio = { version = "1.33", features = ["full"] } quickcheck = "1.0.3" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } + [[test]] name = "smoke" diff --git a/transports/webrtc/src/tokio/connection.rs b/transports/webrtc/src/tokio/connection.rs index 29983d720b5..3bcc4c3193e 100644 --- a/transports/webrtc/src/tokio/connection.rs +++ b/transports/webrtc/src/tokio/connection.rs @@ -101,7 +101,7 @@ impl Connection { tx: Arc>>>, ) { rtc_conn.on_data_channel(Box::new(move |data_channel: Arc| { - log::debug!("Incoming data channel {}", data_channel.id()); + tracing::debug!(channel=%data_channel.id(), "Incoming data channel"); let tx = tx.clone(); @@ -109,7 +109,7 @@ impl Connection { data_channel.on_open({ let data_channel = data_channel.clone(); Box::new(move || { - log::debug!("Data channel {} open", data_channel.id()); + tracing::debug!(channel=%data_channel.id(), "Data channel open"); Box::pin(async move { let data_channel = data_channel.clone(); @@ -118,7 +118,7 @@ impl Connection { Ok(detached) => { let mut tx = tx.lock().await; if let Err(e) = tx.try_send(detached.clone()) { - log::error!("Can't send data channel {}: {}", id, e); + tracing::error!(channel=%id, "Can't send data channel: {}", e); // We're not accepting data channels fast enough => // close this channel. // @@ -126,16 +126,16 @@ impl Connection { // during the negotiation process, but it's not // possible with the current API. if let Err(e) = detached.close().await { - log::error!( - "Failed to close data channel {}: {}", - id, + tracing::error!( + channel=%id, + "Failed to close data channel: {}", e ); } } } Err(e) => { - log::error!("Can't detach data channel {}: {}", id, e); + tracing::error!(channel=%id, "Can't detach data channel: {}", e); } }; }) @@ -156,7 +156,7 @@ impl StreamMuxer for Connection { ) -> Poll> { match ready!(self.incoming_data_channels_rx.poll_next_unpin(cx)) { Some(detached) => { - log::trace!("Incoming stream {}", detached.stream_identifier()); + tracing::trace!(stream=%detached.stream_identifier(), "Incoming stream"); let (stream, drop_listener) = Stream::new(detached); self.drop_listeners.push(drop_listener); @@ -185,7 +185,7 @@ impl StreamMuxer for Connection { match ready!(self.drop_listeners.poll_next_unpin(cx)) { Some(Ok(())) => {} Some(Err(e)) => { - log::debug!("a DropListener failed: {e}") + tracing::debug!("a DropListener failed: {e}") } None => { self.no_drop_listeners_waker = Some(cx.waker().clone()); @@ -208,7 +208,7 @@ impl StreamMuxer for Connection { // No need to hold the lock during the DTLS handshake. drop(peer_conn); - log::trace!("Opening data channel {}", data_channel.id()); + tracing::trace!(channel=%data_channel.id(), "Opening data channel"); let (tx, rx) = oneshot::channel::>(); @@ -226,7 +226,7 @@ impl StreamMuxer for Connection { Ok(detached) => { self.outbound_fut = None; - log::trace!("Outbound stream {}", detached.stream_identifier()); + tracing::trace!(stream=%detached.stream_identifier(), "Outbound stream"); let (stream, drop_listener) = Stream::new(detached); self.drop_listeners.push(drop_listener); @@ -244,7 +244,7 @@ impl StreamMuxer for Connection { } fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - log::debug!("Closing connection"); + tracing::debug!("Closing connection"); let peer_conn = self.peer_conn.clone(); let fut = self.close_fut.get_or_insert(Box::pin(async move { @@ -275,7 +275,7 @@ pub(crate) async fn register_data_channel_open_handler( data_channel.on_open({ let data_channel = data_channel.clone(); Box::new(move || { - log::debug!("Data channel {} open", data_channel.id()); + tracing::debug!(channel=%data_channel.id(), "Data channel open"); Box::pin(async move { let data_channel = data_channel.clone(); @@ -283,14 +283,14 @@ pub(crate) async fn register_data_channel_open_handler( match data_channel.detach().await { Ok(detached) => { if let Err(e) = data_channel_tx.send(detached.clone()) { - log::error!("Can't send data channel {}: {:?}", id, e); + tracing::error!(channel=%id, "Can't send data channel: {:?}", e); if let Err(e) = detached.close().await { - log::error!("Failed to close data channel {}: {}", id, e); + tracing::error!(channel=%id, "Failed to close data channel: {}", e); } } } Err(e) => { - log::error!("Can't detach data channel {}: {}", id, e); + tracing::error!(channel=%id, "Can't detach data channel: {}", e); } }; }) diff --git a/transports/webrtc/src/tokio/sdp.rs b/transports/webrtc/src/tokio/sdp.rs index e49345a01b2..8549a864dcc 100644 --- a/transports/webrtc/src/tokio/sdp.rs +++ b/transports/webrtc/src/tokio/sdp.rs @@ -49,7 +49,7 @@ pub(crate) fn offer(addr: SocketAddr, client_ufrag: &str) -> RTCSessionDescripti client_ufrag, ); - log::trace!("Created SDP offer: {offer}"); + tracing::trace!(offer=%offer, "Created SDP offer"); RTCSessionDescription::offer(offer).unwrap() } diff --git a/transports/webrtc/src/tokio/transport.rs b/transports/webrtc/src/tokio/transport.rs index 4b3f15d5978..b50e44fe4ba 100644 --- a/transports/webrtc/src/tokio/transport.rs +++ b/transports/webrtc/src/tokio/transport.rs @@ -238,7 +238,7 @@ impl ListenStream { /// terminate the stream. fn close(&mut self, reason: Result<(), Error>) { match self.report_closed { - Some(_) => log::debug!("Listener was already closed."), + Some(_) => tracing::debug!("Listener was already closed"), None => { // Report the listener event as closed. let _ = self diff --git a/transports/webrtc/src/tokio/udp_mux.rs b/transports/webrtc/src/tokio/udp_mux.rs index f978121d01c..20e04edaf72 100644 --- a/transports/webrtc/src/tokio/udp_mux.rs +++ b/transports/webrtc/src/tokio/udp_mux.rs @@ -175,7 +175,7 @@ impl UDPMuxNewAddr { None } Err(e) => { - log::debug!("{} (addr={})", e, addr); + tracing::debug!(address=%addr, "{}", e); None } } @@ -342,7 +342,7 @@ impl UDPMuxNewAddr { match self.conn_from_stun_message(read.filled(), &addr) { Some(Ok(s)) => Some(s), Some(Err(e)) => { - log::debug!("addr={}: Error when querying existing connections: {}", &addr, e); + tracing::debug!(address=%&addr, "Error when querying existing connections: {}", e); continue; } None => None, @@ -357,20 +357,20 @@ impl UDPMuxNewAddr { if !self.new_addrs.contains(&addr) { match ufrag_from_stun_message(read.filled(), false) { Ok(ufrag) => { - log::trace!( - "Notifying about new address addr={} from ufrag={}", - &addr, - ufrag - ); + tracing::trace!( + address=%&addr, + %ufrag, + "Notifying about new address from ufrag", + ); self.new_addrs.insert(addr); return Poll::Ready(UDPMuxEvent::NewAddr( NewAddr { addr, ufrag }, )); } Err(e) => { - log::debug!( - "Unknown address addr={} (non STUN packet: {})", - &addr, + tracing::debug!( + address=%&addr, + "Unknown address (non STUN packet: {})", e ); } @@ -384,10 +384,10 @@ impl UDPMuxNewAddr { async move { if let Err(err) = conn.write_packet(&packet, addr).await { - log::error!( - "Failed to write packet: {} (addr={})", + tracing::error!( + address=%addr, + "Failed to write packet: {}", err, - addr ); } } @@ -401,10 +401,10 @@ impl UDPMuxNewAddr { Poll::Pending => {} Poll::Ready(Err(err)) if err.kind() == ErrorKind::TimedOut => {} Poll::Ready(Err(err)) if err.kind() == ErrorKind::ConnectionReset => { - log::debug!("ConnectionReset by remote client {err:?}") + tracing::debug!("ConnectionReset by remote client {err:?}") } Poll::Ready(Err(err)) => { - log::error!("Could not read udp packet: {}", err); + tracing::error!("Could not read udp packet: {}", err); return Poll::Ready(UDPMuxEvent::Error(err)); } } @@ -470,7 +470,7 @@ impl UDPMux for UdpMuxHandle { async fn remove_conn_by_ufrag(&self, ufrag: &str) { if let Err(e) = self.remove_sender.send(ufrag.to_owned()).await { - log::debug!("Failed to send message through channel: {:?}", e); + tracing::debug!("Failed to send message through channel: {:?}", e); } } } @@ -511,12 +511,12 @@ impl UDPMuxWriter for UdpMuxWriterHandle { { Ok(()) => {} Err(e) => { - log::debug!("Failed to send message through channel: {:?}", e); + tracing::debug!("Failed to send message through channel: {:?}", e); return; } } - log::debug!("Registered {} for {}", addr, conn.key()); + tracing::debug!(address=%addr, connection=%conn.key(), "Registered address for connection"); } async fn send_to(&self, buf: &[u8], target: &SocketAddr) -> Result { diff --git a/transports/webrtc/src/tokio/upgrade.rs b/transports/webrtc/src/tokio/upgrade.rs index 414fc2721d0..4145a5e7510 100644 --- a/transports/webrtc/src/tokio/upgrade.rs +++ b/transports/webrtc/src/tokio/upgrade.rs @@ -49,19 +49,16 @@ pub(crate) async fn outbound( server_fingerprint: Fingerprint, id_keys: identity::Keypair, ) -> Result<(PeerId, Connection), Error> { - log::debug!("new outbound connection to {addr})"); + tracing::debug!(address=%addr, "new outbound connection to address"); let (peer_connection, ufrag) = new_outbound_connection(addr, config, udp_mux).await?; let offer = peer_connection.create_offer(None).await?; - log::debug!("created SDP offer for outbound connection: {:?}", offer.sdp); + tracing::debug!(offer=%offer.sdp, "created SDP offer for outbound connection"); peer_connection.set_local_description(offer).await?; let answer = sdp::answer(addr, server_fingerprint, &ufrag); - log::debug!( - "calculated SDP answer for outbound connection: {:?}", - answer - ); + tracing::debug!(?answer, "calculated SDP answer for outbound connection"); peer_connection.set_remote_description(answer).await?; // This will start the gathering of ICE candidates. let data_channel = create_substream_for_noise_handshake(&peer_connection).await?; @@ -85,16 +82,16 @@ pub(crate) async fn inbound( remote_ufrag: String, id_keys: identity::Keypair, ) -> Result<(PeerId, Connection), Error> { - log::debug!("new inbound connection from {addr} (ufrag: {remote_ufrag})"); + tracing::debug!(address=%addr, ufrag=%remote_ufrag, "new inbound connection from address"); let peer_connection = new_inbound_connection(addr, config, udp_mux, &remote_ufrag).await?; let offer = sdp::offer(addr, &remote_ufrag); - log::debug!("calculated SDP offer for inbound connection: {:?}", offer); + tracing::debug!(?offer, "calculated SDP offer for inbound connection"); peer_connection.set_remote_description(offer).await?; let answer = peer_connection.create_answer(None).await?; - log::debug!("created SDP answer for inbound connection: {:?}", answer); + tracing::debug!(?answer, "created SDP answer for inbound connection"); peer_connection.set_local_description(answer).await?; // This will start the gathering of ICE candidates. let data_channel = create_substream_for_noise_handshake(&peer_connection).await?; diff --git a/transports/webrtc/tests/smoke.rs b/transports/webrtc/tests/smoke.rs index ce94da0aea8..6e83f75f0d4 100644 --- a/transports/webrtc/tests/smoke.rs +++ b/transports/webrtc/tests/smoke.rs @@ -33,10 +33,13 @@ use std::num::NonZeroU8; use std::pin::Pin; use std::task::{Context, Poll}; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[tokio::test] async fn smoke() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (a_peer_id, mut a_transport) = create_transport(); let (b_peer_id, mut b_transport) = create_transport(); @@ -53,7 +56,9 @@ async fn smoke() { // Note: This test should likely be ported to the muxer compliance test suite. #[test] fn concurrent_connections_and_streams_tokio() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let rt = tokio::runtime::Runtime::new().unwrap(); let _guard = rt.enter(); @@ -102,7 +107,11 @@ fn prop(number_listeners: NonZeroU8, number_streams: NonZeroU8) -> quickcheck::T let (listeners_tx, mut listeners_rx) = mpsc::channel(number_listeners); - log::info!("Creating {number_streams} streams on {number_listeners} connections"); + tracing::info!( + stream_count=%number_streams, + connection_count=%number_listeners, + "Creating streams on connections" + ); // Spawn the listener nodes. for _ in 0..number_listeners { @@ -244,7 +253,7 @@ async fn open_outbound_streams( }); } - log::info!("Created {number_streams} streams"); + tracing::info!(stream_count=%number_streams, "Created streams"); while future::poll_fn(|cx| connection.poll_unpin(cx)) .await diff --git a/transports/websocket-websys/Cargo.toml b/transports/websocket-websys/Cargo.toml index 0e9c5796b97..779cc4d8602 100644 --- a/transports/websocket-websys/Cargo.toml +++ b/transports/websocket-websys/Cargo.toml @@ -15,7 +15,7 @@ bytes = "1.4.0" futures = "0.3.29" js-sys = "0.3.61" libp2p-core = { workspace = true } -log = "0.4.19" +tracing = "0.1.37" parking_lot = "0.12.1" send_wrapper = "0.6.0" thiserror = "1.0.50" diff --git a/transports/websocket-websys/src/lib.rs b/transports/websocket-websys/src/lib.rs index 24ca4fdce5d..b4f7566f95e 100644 --- a/transports/websocket-websys/src/lib.rs +++ b/transports/websocket-websys/src/lib.rs @@ -278,7 +278,7 @@ impl Connection { let mut read_buffer = read_buffer.lock().unwrap(); if read_buffer.len() + data.length() as usize > MAX_BUFFER { - log::warn!("Remote is overloading us with messages, closing connection"); + tracing::warn!("Remote is overloading us with messages, closing connection"); errored.store(true, Ordering::SeqCst); return; diff --git a/transports/websocket/Cargo.toml b/transports/websocket/Cargo.toml index 77616e1cefd..b4c56539139 100644 --- a/transports/websocket/Cargo.toml +++ b/transports/websocket/Cargo.toml @@ -16,11 +16,11 @@ either = "1.9.0" futures = "0.3.29" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4.20" parking_lot = "0.12.0" pin-project-lite = "0.2.13" rw-stream-sink = { workspace = true } soketto = "0.7.0" +tracing = "0.1.37" url = "2.4" webpki-roots = "0.25" diff --git a/transports/websocket/src/framed.rs b/transports/websocket/src/framed.rs index 07013973fdc..3593e1eaff2 100644 --- a/transports/websocket/src/framed.rs +++ b/transports/websocket/src/framed.rs @@ -28,7 +28,6 @@ use libp2p_core::{ transport::{ListenerId, TransportError, TransportEvent}, Transport, }; -use log::{debug, trace}; use parking_lot::Mutex; use soketto::{ connection::{self, CloseReason}, @@ -127,13 +126,13 @@ where if self.tls_config.server.is_some() { p } else { - debug!("/wss address but TLS server support is not configured"); + tracing::debug!("/wss address but TLS server support is not configured"); return Err(TransportError::MultiaddrNotSupported(addr)); } } Some(p @ Protocol::Ws(_)) => p, _ => { - debug!("{} is not a websocket multiaddr", addr); + tracing::debug!(address=%addr, "Address is not a websocket multiaddr"); return Err(TransportError::MultiaddrNotSupported(addr)); } }; @@ -187,7 +186,7 @@ where .get(&listener_id) .expect("Protocol was inserted in Transport::listen_on."); listen_addr.push(proto.clone()); - debug!("Listening on {}", listen_addr); + tracing::debug!(address=%listen_addr, "Listening on address"); TransportEvent::NewAddress { listener_id, listen_addr, @@ -288,7 +287,7 @@ where { Ok(Either::Left(redirect)) => { if remaining_redirects == 0 { - debug!("Too many redirects (> {})", max_redirects); + tracing::debug!(%max_redirects, "Too many redirects"); return Err(Error::TooManyRedirects); } remaining_redirects -= 1; @@ -310,7 +309,7 @@ where tls_config: tls::Config, role_override: Endpoint, ) -> Result>, Error> { - trace!("Dialing websocket address: {:?}", addr); + tracing::trace!(address=?addr, "Dialing websocket address"); let dial = match role_override { Endpoint::Dialer => transport.lock().dial(addr.tcp_addr), @@ -322,19 +321,19 @@ where })?; let stream = dial.map_err(Error::Transport).await?; - trace!("TCP connection to {} established.", addr.host_port); + tracing::trace!(port=%addr.host_port, "TCP connection established"); let stream = if addr.use_tls { // begin TLS session let dns_name = addr .dns_name .expect("for use_tls we have checked that dns_name is some"); - trace!("Starting TLS handshake with {:?}", dns_name); + tracing::trace!(?dns_name, "Starting TLS handshake"); let stream = tls_config .client .connect(dns_name.clone(), stream) .map_err(|e| { - debug!("TLS handshake with {:?} failed: {}", dns_name, e); + tracing::debug!(?dns_name, "TLS handshake failed: {}", e); Error::Tls(tls::Error::from(e)) }) .await?; @@ -346,7 +345,7 @@ where future::Either::Right(stream) }; - trace!("Sending websocket handshake to {}", addr.host_port); + tracing::trace!(port=%addr.host_port, "Sending websocket handshake"); let mut client = handshake::Client::new(stream, &addr.host_port, addr.path.as_ref()); @@ -359,9 +358,10 @@ where status_code, location, } => { - debug!( - "received redirect ({}); location: {}", - status_code, location + tracing::debug!( + %status_code, + %location, + "received redirect" ); Ok(Either::Left(location)) } @@ -370,7 +370,7 @@ where Err(Error::Handshake(msg.into())) } handshake::ServerResponse::Accepted { .. } => { - trace!("websocket handshake with {} successful", addr.host_port); + tracing::trace!(port=%addr.host_port, "websocket handshake successful"); Ok(Either::Right(Connection::new(client.into_builder()))) } } @@ -388,7 +388,7 @@ where async move { let stream = upgrade.map_err(Error::Transport).await?; - trace!("incoming connection from {}", remote_addr); + tracing::trace!(address=%remote_addr, "incoming connection from address"); let stream = if use_tls { // begin TLS session @@ -396,12 +396,12 @@ where .server .expect("for use_tls we checked server is not none"); - trace!("awaiting TLS handshake with {}", remote_addr); + tracing::trace!(address=%remote_addr, "awaiting TLS handshake with address"); let stream = server .accept(stream) .map_err(move |e| { - debug!("TLS handshake with {} failed: {}", remote_addr, e); + tracing::debug!(address=%remote_addr, "TLS handshake with address failed: {}", e); Error::Tls(tls::Error::from(e)) }) .await?; @@ -414,9 +414,9 @@ where future::Either::Right(stream) }; - trace!( - "receiving websocket handshake request from {}", - remote_addr2 + tracing::trace!( + address=%remote_addr2, + "receiving websocket handshake request from address" ); let mut server = handshake::Server::new(stream); @@ -429,9 +429,9 @@ where request.key() }; - trace!( - "accepting websocket handshake request from {}", - remote_addr2 + tracing::trace!( + address=%remote_addr2, + "accepting websocket handshake request from address" ); let response = handshake::server::Response::Accept { @@ -511,7 +511,7 @@ fn parse_ws_dial_addr(addr: Multiaddr) -> Result> { Some(Protocol::Ws(path)) => break (false, path.into_owned()), Some(Protocol::Wss(path)) => { if dns_name.is_none() { - debug!("Missing DNS name in WSS address: {}", addr); + tracing::debug!(addrress=%addr, "Missing DNS name in WSS address"); return Err(Error::InvalidMultiaddr(addr)); } break (true, path.into_owned()); @@ -556,13 +556,13 @@ fn location_to_multiaddr(location: &str) -> Result> { } else if s.eq_ignore_ascii_case("http") | s.eq_ignore_ascii_case("ws") { a.push(Protocol::Ws(url.path().into())) } else { - debug!("unsupported scheme: {}", s); + tracing::debug!(scheme=%s, "unsupported scheme"); return Err(Error::InvalidRedirectLocation); } Ok(a) } Err(e) => { - debug!("failed to parse url as multi-address: {:?}", e); + tracing::debug!("failed to parse url as multi-address: {:?}", e); Err(Error::InvalidRedirectLocation) } } diff --git a/transports/webtransport-websys/Cargo.toml b/transports/webtransport-websys/Cargo.toml index cbc340e9244..49053349298 100644 --- a/transports/webtransport-websys/Cargo.toml +++ b/transports/webtransport-websys/Cargo.toml @@ -19,11 +19,11 @@ js-sys = "0.3.64" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } libp2p-noise = { workspace = true } -log = "0.4.20" multiaddr = { workspace = true } multihash = { workspace = true } send_wrapper = { version = "0.6.0", features = ["futures"] } thiserror = "1.0.50" +tracing = "0.1.37" wasm-bindgen = "0.2.87" wasm-bindgen-futures = "0.4.37" web-sys = { version = "0.3.64", features = [ diff --git a/transports/webtransport-websys/src/transport.rs b/transports/webtransport-websys/src/transport.rs index dcb3639a194..cb556ffef99 100644 --- a/transports/webtransport-websys/src/transport.rs +++ b/transports/webtransport-websys/src/transport.rs @@ -65,7 +65,7 @@ impl libp2p_core::Transport for Transport { fn dial(&mut self, addr: Multiaddr) -> Result> { let endpoint = Endpoint::from_multiaddr(&addr).map_err(|e| match e { e @ Error::InvalidMultiaddr(_) => { - log::warn!("{}", e); + tracing::warn!("{}", e); TransportError::MultiaddrNotSupported(addr) } e => TransportError::Other(e),