From 7e861b03c1a374da6bd7a4e063e4c3b3df2d4ce9 Mon Sep 17 00:00:00 2001 From: Julian Tescher Date: Sun, 10 Apr 2022 14:38:10 -0400 Subject: [PATCH 1/6] Prepare for 1.0.0-beta.1 release --- opentelemetry-api/CHANGELOG.md | 2 +- opentelemetry-api/Cargo.toml | 2 +- opentelemetry-aws/CHANGELOG.md | 7 ++++++ opentelemetry-aws/Cargo.toml | 4 +-- opentelemetry-contrib/CHANGELOG.md | 7 ++++++ opentelemetry-contrib/Cargo.toml | 4 +-- opentelemetry-datadog/CHANGELOG.md | 9 +++++++ opentelemetry-datadog/Cargo.toml | 8 +++--- opentelemetry-dynatrace/CHANGELOG.md | 7 ++++++ opentelemetry-dynatrace/Cargo.toml | 6 ++--- opentelemetry-http/CHANGELOG.md | 6 +++++ opentelemetry-http/Cargo.toml | 4 +-- opentelemetry-jaeger/CHANGELOG.md | 11 ++++++++ opentelemetry-jaeger/Cargo.toml | 8 +++--- opentelemetry-otlp/CHANGELOG.md | 8 ++++++ opentelemetry-otlp/Cargo.toml | 6 ++--- opentelemetry-prometheus/CHANGELOG.md | 9 +++++-- opentelemetry-prometheus/Cargo.toml | 6 ++--- opentelemetry-proto/CHANGELOG.md | 5 ++++ opentelemetry-proto/Cargo.toml | 2 +- opentelemetry-sdk/CHANGELOG.md | 2 +- opentelemetry-sdk/Cargo.toml | 4 +-- .../CHANGELOG.md | 7 ++++++ opentelemetry-semantic-conventions/Cargo.toml | 4 +-- opentelemetry-stackdriver/CHANGELOG.md | 11 ++++++++ opentelemetry-stackdriver/Cargo.toml | 6 ++--- opentelemetry-zipkin/CHANGELOG.md | 5 +++- opentelemetry-zipkin/Cargo.toml | 8 +++--- opentelemetry-zpages/CHANGELOG.md | 6 +++++ opentelemetry-zpages/Cargo.toml | 6 ++--- opentelemetry/CHANGELOG.md | 25 +++++++++++++++++++ opentelemetry/Cargo.toml | 6 ++--- 32 files changed, 164 insertions(+), 47 deletions(-) create mode 100644 opentelemetry-proto/CHANGELOG.md diff --git a/opentelemetry-api/CHANGELOG.md b/opentelemetry-api/CHANGELOG.md index c3712a44fc..87b81aabde 100644 --- a/opentelemetry-api/CHANGELOG.md +++ b/opentelemetry-api/CHANGELOG.md @@ -1,5 +1,5 @@ # Changelog -## v0.1.0 +## v1.0.0-beta.1 - API split from `opentelemetry` crate diff --git a/opentelemetry-api/Cargo.toml b/opentelemetry-api/Cargo.toml index 257ad09a47..aaecaa6777 100644 --- a/opentelemetry-api/Cargo.toml +++ b/opentelemetry-api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-api" -version = "0.1.0" +version = "1.0.0-beta.1" edition = "2018" [dependencies] diff --git a/opentelemetry-aws/CHANGELOG.md b/opentelemetry-aws/CHANGELOG.md index 2bdbff448a..5957d03a29 100644 --- a/opentelemetry-aws/CHANGELOG.md +++ b/opentelemetry-aws/CHANGELOG.md @@ -1,5 +1,12 @@ # Changelog +## v0.6.0 + +### Changed + +- reduce `tokio` feature requirements #750 +- Update to opentelemetry v1.0.0-beta.1 + ## v0.5.0 ### Changed diff --git a/opentelemetry-aws/Cargo.toml b/opentelemetry-aws/Cargo.toml index 538125d973..a9375b1bc6 100644 --- a/opentelemetry-aws/Cargo.toml +++ b/opentelemetry-aws/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-aws" -version = "0.5.0" +version = "0.6.0" description = "AWS exporters and propagators for OpenTelemetry" homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-aws" repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-aws" @@ -22,7 +22,7 @@ default = ["trace"] trace = ["opentelemetry/trace"] [dependencies] -opentelemetry = { version = "0.17", path = "../opentelemetry", features = ["trace"] } +opentelemetry = { version = "1.0.0-beta.1", path = "../opentelemetry", features = ["trace"] } lazy_static = "1.4" [dev-dependencies] diff --git a/opentelemetry-contrib/CHANGELOG.md b/opentelemetry-contrib/CHANGELOG.md index 4efbf9ab41..139a12e9f2 100644 --- a/opentelemetry-contrib/CHANGELOG.md +++ b/opentelemetry-contrib/CHANGELOG.md @@ -1,5 +1,12 @@ # Changelog +## v0.10.0 + +### Changed + +- Rename binary propagator's functions #776 +- Update to opentelemetry v1.0.0-beta.1 + ## v0.9.0 ### Changed diff --git a/opentelemetry-contrib/Cargo.toml b/opentelemetry-contrib/Cargo.toml index 819ce53979..6e4321a26a 100644 --- a/opentelemetry-contrib/Cargo.toml +++ b/opentelemetry-contrib/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-contrib" -version = "0.9.0" +version = "0.10.0" description = "Rust contrib repo for OpenTelemetry" homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-contrib" repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-contrib" @@ -23,7 +23,7 @@ base64_format = ["base64", "binary_propagator"] binary_propagator = [] [dependencies] -opentelemetry = { version = "0.17", path = "../opentelemetry", features = ["trace"] } +opentelemetry = { version = "1.0.0-beta.1", path = "../opentelemetry", features = ["trace"] } base64 = { version = "0.13", optional = true } lazy_static = "1.4" diff --git a/opentelemetry-datadog/CHANGELOG.md b/opentelemetry-datadog/CHANGELOG.md index 1c643f8aec..8146e07278 100644 --- a/opentelemetry-datadog/CHANGELOG.md +++ b/opentelemetry-datadog/CHANGELOG.md @@ -1,5 +1,14 @@ # Changelog +## v0.6.0 + +### Changed + +- Allow custom mapping #770 +- Update to opentelemetry v1.0.0-beta.1 +- Update to opentelemetry-http v0.7.0 +- Update to opentelemetry-semantic-conventions v0.10.0 + ## v0.5.0 ### Changed diff --git a/opentelemetry-datadog/Cargo.toml b/opentelemetry-datadog/Cargo.toml index 00f02a15ae..d8c206a3c4 100644 --- a/opentelemetry-datadog/Cargo.toml +++ b/opentelemetry-datadog/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-datadog" -version = "0.5.0" +version = "0.6.0" description = "Datadog exporters and propagators for OpenTelemetry" homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-datadog" repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-datadog" @@ -25,9 +25,9 @@ surf-client = ["surf", "opentelemetry-http/surf"] [dependencies] async-trait = "0.1" indexmap = "1.6" -opentelemetry = { version = "0.17", path = "../opentelemetry", features = ["trace"] } -opentelemetry-http = { version = "0.6", path = "../opentelemetry-http" } -opentelemetry-semantic-conventions = { version = "0.9", path = "../opentelemetry-semantic-conventions" } +opentelemetry = { version = "1.0.0-beta.1", path = "../opentelemetry", features = ["trace"] } +opentelemetry-http = { version = "0.7", path = "../opentelemetry-http" } +opentelemetry-semantic-conventions = { version = "0.10", path = "../opentelemetry-semantic-conventions" } rmp = "0.8" reqwest = { version = "0.11", default-features = false, optional = true } surf = { version = "2.0", default-features = false, optional = true } diff --git a/opentelemetry-dynatrace/CHANGELOG.md b/opentelemetry-dynatrace/CHANGELOG.md index 71cb4cea39..0275f035e8 100644 --- a/opentelemetry-dynatrace/CHANGELOG.md +++ b/opentelemetry-dynatrace/CHANGELOG.md @@ -1,5 +1,12 @@ # Changelog +## v0.2.0 + +### Changed + +- Update to opentelemetry v1.0.0-beta.1 +- Update to opentelemetry-http v0.7.0 + ## v0.1.0 ### Added diff --git a/opentelemetry-dynatrace/Cargo.toml b/opentelemetry-dynatrace/Cargo.toml index 4238980657..f9da740137 100644 --- a/opentelemetry-dynatrace/Cargo.toml +++ b/opentelemetry-dynatrace/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-dynatrace" -version = "0.1.0" +version = "0.2.0" description = "Dynatrace exporters and propagators for OpenTelemetry" homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-dynatrace" repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-dynatrace" @@ -57,8 +57,8 @@ getrandom = { version = "0.2", optional = true } http = "0.2" isahc = { version = "1.4", default-features = false, optional = true } js-sys = { version = "0.3.5", optional = true } -opentelemetry = { version = "0.17", path = "../opentelemetry", default-features = false } -opentelemetry-http = { version = "0.6", path = "../opentelemetry-http", default-features = false } +opentelemetry = { version = "1.0.0-beta.1", path = "../opentelemetry", default-features = false } +opentelemetry-http = { version = "0.7", path = "../opentelemetry-http", default-features = false } pin-project = { version = "1.0", optional = true } reqwest = { version = "0.11", default-features = false, optional = true } surf = { version = "2.0", default-features = false, optional = true } diff --git a/opentelemetry-http/CHANGELOG.md b/opentelemetry-http/CHANGELOG.md index e79ed31d19..482460fe58 100644 --- a/opentelemetry-http/CHANGELOG.md +++ b/opentelemetry-http/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## v0.7.0 + +### Changed + +- Update to opentelemetry v1.0.0-beta.1 + ## v0.6.0 ### Changed diff --git a/opentelemetry-http/Cargo.toml b/opentelemetry-http/Cargo.toml index 7ac90674c8..858b58434f 100644 --- a/opentelemetry-http/Cargo.toml +++ b/opentelemetry-http/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-http" -version = "0.6.0" +version = "0.7.0" description = "Helper implementations for exchange of traces and metrics over HTTP" homepage = "https://github.com/open-telemetry/opentelemetry-rust" repository = "https://github.com/open-telemetry/opentelemetry-rust" @@ -13,6 +13,6 @@ async-trait = "0.1" bytes = "1" http = "0.2" isahc = { version = "1.4", default-features = false, optional = true } -opentelemetry = { version = "0.17", path = "../opentelemetry", features = ["trace"] } +opentelemetry = { version = "1.0.0-beta.1", path = "../opentelemetry", features = ["trace"] } reqwest = { version = "0.11", default-features = false, features = ["blocking"], optional = true } surf = { version = "2.0", default-features = false, optional = true } diff --git a/opentelemetry-jaeger/CHANGELOG.md b/opentelemetry-jaeger/CHANGELOG.md index fe1ccfd361..ec7a8ac4a7 100644 --- a/opentelemetry-jaeger/CHANGELOG.md +++ b/opentelemetry-jaeger/CHANGELOG.md @@ -1,5 +1,16 @@ # Changelog +## v0.17.0 + +### Changed + +- Consolidate the config errors #762 +- Better configuration pipeline #748 +- Add Timeout Environment Var #729 +- Update to opentelemetry v1.0.0-beta.1 +- Update to opentelemetry-http v0.7.0 +- Update to opentelemetry-semantic-conventions v0.10.0 + ## v0.16.0 ### Changed diff --git a/opentelemetry-jaeger/Cargo.toml b/opentelemetry-jaeger/Cargo.toml index f020cb87ca..cd240fe07b 100644 --- a/opentelemetry-jaeger/Cargo.toml +++ b/opentelemetry-jaeger/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-jaeger" -version = "0.16.0" +version = "0.17.0" description = "Jaeger exporter for OpenTelemetry" homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-jaeger" repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-jaeger" @@ -28,9 +28,9 @@ http = { version = "0.2", optional = true } isahc = { version = "1.4", default-features = false, optional = true } js-sys = { version = "0.3", optional = true } lazy_static = "1.4" -opentelemetry = { version = "0.17", default-features = false, features = ["trace"], path = "../opentelemetry" } -opentelemetry-http = { version = "0.6", path = "../opentelemetry-http", optional = true } -opentelemetry-semantic-conventions = { version = "0.9", path = "../opentelemetry-semantic-conventions" } +opentelemetry = { version = "1.0.0-beta.1", default-features = false, features = ["trace"], path = "../opentelemetry" } +opentelemetry-http = { version = "0.7", path = "../opentelemetry-http", optional = true } +opentelemetry-semantic-conventions = { version = "0.10", path = "../opentelemetry-semantic-conventions" } pin-project = { version = "1.0", optional = true } reqwest = { version = "0.11", default-features = false, optional = true } surf = { version = "2.0", default-features = false, optional = true } diff --git a/opentelemetry-otlp/CHANGELOG.md b/opentelemetry-otlp/CHANGELOG.md index fb7778be85..26ad244bdb 100644 --- a/opentelemetry-otlp/CHANGELOG.md +++ b/opentelemetry-otlp/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## v0.11.0 + +### Changed + +- reduce `tokio` feature requirements #750 +- Update to opentelemetry v1.0.0-beta.1 +- Update to opentelemetry-http v0.7.0 + ## v0.10.0 ### Changed diff --git a/opentelemetry-otlp/Cargo.toml b/opentelemetry-otlp/Cargo.toml index 074a057ca5..a8143dac53 100644 --- a/opentelemetry-otlp/Cargo.toml +++ b/opentelemetry-otlp/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-otlp" -version = "0.10.0" +version = "0.11.0" description = "Exporter for the OpenTelemetry Collector" homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-otlp" repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-otlp" @@ -35,8 +35,8 @@ futures-util = { version = "0.3", default-features = false, features = ["std"] } opentelemetry-proto = { version = "0.1", path = "../opentelemetry-proto", default-features = false } grpcio = { version = "0.9", optional = true } -opentelemetry = { version = "0.17", default-features = false, features = ["trace"], path = "../opentelemetry" } -opentelemetry-http = { version = "0.6", path = "../opentelemetry-http", optional = true } +opentelemetry = { version = "1.0.0-beta.1", default-features = false, features = ["trace"], path = "../opentelemetry" } +opentelemetry-http = { version = "0.7", path = "../opentelemetry-http", optional = true } protobuf = { version = "2.18", optional = true } prost = { version = "0.9", optional = true } diff --git a/opentelemetry-prometheus/CHANGELOG.md b/opentelemetry-prometheus/CHANGELOG.md index 199cbe800c..cde2fd238a 100644 --- a/opentelemetry-prometheus/CHANGELOG.md +++ b/opentelemetry-prometheus/CHANGELOG.md @@ -1,9 +1,14 @@ # Changelog -## Unreleased +## v0.11.0 + +### Changed + +- Update to opentelemetry v1.0.0-beta.1 ### Removed -- BREAKING: `PrometheusExporter::new()` removed. Use `ExporterBuilder`. + +- BREAKING: `PrometheusExporter::new()` removed. Use `ExporterBuilder`. #727 ## v0.10.0 diff --git a/opentelemetry-prometheus/Cargo.toml b/opentelemetry-prometheus/Cargo.toml index 2c0201bed1..75b2fe678f 100644 --- a/opentelemetry-prometheus/Cargo.toml +++ b/opentelemetry-prometheus/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-prometheus" -version = "0.10.0" +version = "0.11.0" description = "Prometheus exporter for OpenTelemetry" homepage = "https://github.com/open-telemetry/opentelemetry-rust" repository = "https://github.com/open-telemetry/opentelemetry-rust" @@ -19,12 +19,12 @@ all-features = true rustdoc-args = ["--cfg", "docsrs"] [dependencies] -opentelemetry = { version = "0.17", path = "../opentelemetry", default-features = false, features = ["metrics"] } +opentelemetry = { version = "1.0.0-beta.1", path = "../opentelemetry", default-features = false, features = ["metrics"] } prometheus = "0.13" protobuf = "2.14" [dev-dependencies] -opentelemetry = { version = "0.17", path = "../opentelemetry", default-features = false, features = ["metrics", "testing"] } +opentelemetry = { path = "../opentelemetry", features = ["metrics", "testing"] } lazy_static = "1.4" [features] diff --git a/opentelemetry-proto/CHANGELOG.md b/opentelemetry-proto/CHANGELOG.md new file mode 100644 index 0000000000..1b7296724d --- /dev/null +++ b/opentelemetry-proto/CHANGELOG.md @@ -0,0 +1,5 @@ +# Changelog + +## v0.1.0 + +Initial crate release. diff --git a/opentelemetry-proto/Cargo.toml b/opentelemetry-proto/Cargo.toml index 5a4f2fcaef..c97572c53d 100644 --- a/opentelemetry-proto/Cargo.toml +++ b/opentelemetry-proto/Cargo.toml @@ -50,7 +50,7 @@ grpcio = { version = "0.9", optional = true } tonic = { version = "0.6.2", optional = true } prost = { version = "0.9", optional = true } protobuf = { version = "2.18", optional = true } # todo: update to 3.0 so we have docs for generated types. -opentelemetry = { version = "0.17", default-features = false, features = ["trace", "metrics"], path = "../opentelemetry" } +opentelemetry = { version = "1.0.0-beta.1", default-features = false, features = ["trace", "metrics"], path = "../opentelemetry" } futures = { version = "0.3", default-features = false, features = ["std"] } futures-util = { version = "0.3", default-features = false, features = ["std"] } serde = { version = "1.0", optional = true } diff --git a/opentelemetry-sdk/CHANGELOG.md b/opentelemetry-sdk/CHANGELOG.md index aa67505013..c9a61f7d28 100644 --- a/opentelemetry-sdk/CHANGELOG.md +++ b/opentelemetry-sdk/CHANGELOG.md @@ -1,5 +1,5 @@ # Changelog -## v0.1.0 +## v1.0.0-beta.1 - SDK split from `opentelemetry` crate diff --git a/opentelemetry-sdk/Cargo.toml b/opentelemetry-sdk/Cargo.toml index 37595ae041..53f07cf423 100644 --- a/opentelemetry-sdk/Cargo.toml +++ b/opentelemetry-sdk/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "opentelemetry-sdk" -version = "0.1.0" +version = "1.0.0-beta.1" edition = "2018" [dependencies] -opentelemetry-api = { version = "0.1", path = "../opentelemetry-api/" } +opentelemetry-api = { version = "1.0.0-beta.1", path = "../opentelemetry-api/" } async-std = { version = "1.6", features = ["unstable"], optional = true } async-trait = { version = "0.1", optional = true } dashmap = { version = "4.0.1", optional = true } diff --git a/opentelemetry-semantic-conventions/CHANGELOG.md b/opentelemetry-semantic-conventions/CHANGELOG.md index 712690bbaf..0363ee3dec 100644 --- a/opentelemetry-semantic-conventions/CHANGELOG.md +++ b/opentelemetry-semantic-conventions/CHANGELOG.md @@ -1,5 +1,12 @@ # Changelog +## v0.10.0 + +### Changed + +- update to v1.9 spec #754 +- Update to opentelemetry v1.0.0-beta.1 + ## v0.9.0 ### Changed diff --git a/opentelemetry-semantic-conventions/Cargo.toml b/opentelemetry-semantic-conventions/Cargo.toml index 25c4cf8088..601878c886 100644 --- a/opentelemetry-semantic-conventions/Cargo.toml +++ b/opentelemetry-semantic-conventions/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-semantic-conventions" -version = "0.9.0" +version = "0.10.0" description = "Semantic conventions for OpenTelemetry" homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-semantic-conventions" repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-semantic-conventions" @@ -19,7 +19,7 @@ all-features = true rustdoc-args = ["--cfg", "docsrs"] [dependencies] -opentelemetry = { version = "0.17", default-features = false, path = "../opentelemetry" } +opentelemetry = { version = "1.0.0-beta.1", default-features = false, path = "../opentelemetry" } [dev-dependencies] opentelemetry = { default-features = false, features = ["trace"], path = "../opentelemetry" } diff --git a/opentelemetry-stackdriver/CHANGELOG.md b/opentelemetry-stackdriver/CHANGELOG.md index cfdf132eff..0b78a31918 100644 --- a/opentelemetry-stackdriver/CHANGELOG.md +++ b/opentelemetry-stackdriver/CHANGELOG.md @@ -1,5 +1,16 @@ # Changelog +## v0.15.0 + +### Added + +- Added mappings from OTel attributes to Google Cloud Traces #744 + +### Changed + +- Upgrade to opentelemetry v1.0.0-beta.1 +- Upgrade to opentelemetry-semantic-conventions v0.10 + ## v0.14.0 ### Changed diff --git a/opentelemetry-stackdriver/Cargo.toml b/opentelemetry-stackdriver/Cargo.toml index d1bff86618..65cdde5bc2 100644 --- a/opentelemetry-stackdriver/Cargo.toml +++ b/opentelemetry-stackdriver/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-stackdriver" -version = "0.14.0" +version = "0.15.0" description = "A Rust opentelemetry exporter that uploads traces to Google Stackdriver trace." documentation = "https://docs.rs/opentelemetry-stackdriver/" repository = "https://github.com/open-telemetry/opentelemetry-rust" @@ -16,8 +16,8 @@ hex = "0.4" http = "0.2" hyper = "0.14.2" hyper-rustls = { version = "0.22.1", optional = true } -opentelemetry = { version = "0.17", path = "../opentelemetry" } -opentelemetry-semantic-conventions = { version = "0.9", path = "../opentelemetry-semantic-conventions" } +opentelemetry = { version = "1.0.0-beta.1", path = "../opentelemetry" } +opentelemetry-semantic-conventions = { version = "0.10", path = "../opentelemetry-semantic-conventions" } prost = "0.9" prost-types = "0.9" thiserror = "1.0.30" diff --git a/opentelemetry-zipkin/CHANGELOG.md b/opentelemetry-zipkin/CHANGELOG.md index 560327f4b5..2e1e12261f 100644 --- a/opentelemetry-zipkin/CHANGELOG.md +++ b/opentelemetry-zipkin/CHANGELOG.md @@ -1,6 +1,6 @@ # Changelog -## unreleased +## v0.16.0 ## Added @@ -9,6 +9,9 @@ ## Changed - Add defaults for timeouts to HTTP clients #718 +- Update to opentelemetry v1.0.0-beta.1 +- Update to opentelemetry-http v0.7.0 +- Update to opentelemetry-semantic-conventions v0.10.0 ## v0.15.0 diff --git a/opentelemetry-zipkin/Cargo.toml b/opentelemetry-zipkin/Cargo.toml index b6f35241bc..db15b73c99 100644 --- a/opentelemetry-zipkin/Cargo.toml +++ b/opentelemetry-zipkin/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-zipkin" -version = "0.15.0" +version = "0.16.0" description = "Zipkin exporter for OpenTelemetry" homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-zipkin" repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-zipkin" @@ -27,9 +27,9 @@ surf-client = ["surf", "opentelemetry-http/surf"] [dependencies] async-trait = "0.1" -opentelemetry = { version = "0.17", path = "../opentelemetry", features = ["trace"] } -opentelemetry-http = { version = "0.6", path = "../opentelemetry-http", optional = true } -opentelemetry-semantic-conventions = { version = "0.9", path = "../opentelemetry-semantic-conventions" } +opentelemetry = { version = "1.0.0-beta.1", path = "../opentelemetry", features = ["trace"] } +opentelemetry-http = { version = "0.7", path = "../opentelemetry-http", optional = true } +opentelemetry-semantic-conventions = { version = "0.10", path = "../opentelemetry-semantic-conventions" } serde_json = "1.0" serde = { version = "1.0", features = ["derive"] } typed-builder = "0.9" diff --git a/opentelemetry-zpages/CHANGELOG.md b/opentelemetry-zpages/CHANGELOG.md index 85c869b628..851065bc04 100644 --- a/opentelemetry-zpages/CHANGELOG.md +++ b/opentelemetry-zpages/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## v0.3.0 + +### Changed + +- Update to opentelemetry v1.0.0-beta.1 + ## v0.2.0 ### Changed diff --git a/opentelemetry-zpages/Cargo.toml b/opentelemetry-zpages/Cargo.toml index b8b937f1c6..be2c4bdcec 100644 --- a/opentelemetry-zpages/Cargo.toml +++ b/opentelemetry-zpages/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-zpages" -version = "0.2.0" +version = "0.3.0" description = "ZPages implementation for OpenTelemetry" homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/master/opentelemetry-zpages" repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/master/opentelemetry-zpages" @@ -19,7 +19,7 @@ all-features = true rustdoc-args = ["--cfg", "docsrs"] [dependencies] -opentelemetry = { version = "0.17", path = "../opentelemetry", default-features = false, features = ["trace"] } +opentelemetry = { version = "1.0.0-beta.1", path = "../opentelemetry", default-features = false, features = ["trace"] } opentelemetry-proto = { version = "0.1", path = "../opentelemetry-proto", features = ["with-serde", "zpages", "gen-protoc"], default-features = false } async-channel = "1.6" futures-channel = "0.3" @@ -30,4 +30,4 @@ serde_json = "1.0" [dev-dependencies] tokio = { version = "1.0", features = ["macros", "rt"] } -opentelemetry = { version = "0.17", path = "../opentelemetry", default-features = false, features = ["trace", "testing"] } +opentelemetry = { path = "../opentelemetry", features = ["trace", "testing"] } diff --git a/opentelemetry/CHANGELOG.md b/opentelemetry/CHANGELOG.md index 6a53e1eece..93e2c1f7d9 100644 --- a/opentelemetry/CHANGELOG.md +++ b/opentelemetry/CHANGELOG.md @@ -1,5 +1,30 @@ # Changelog +## [v1.0.0-beta.1](https://github.com/open-telemetry/opentelemetry-rust/compare/v0.17.0...v1.0.0-beta.1) + +### Added + +- Pull sampling probability from `OTEL_TRACES_SAMPLER_ARG` in default sdk config #737 +- Add `schema_url` to `Tracer` #743 + +### Changed + +- Deprecate metrics `ValueRecorder` in favor of `Histogram` #728 +- Move `IdGenerator` to SDK, rename to `RandomIdGenerator` #742 +- `meter_with_version` accepts optional parameter for `version` and `schema_url` #752 +- Unify `Event` and `Link` access patterns #757 +- move `SpanKind` display format impl to jaeger crate #758 +- make `TraceStateError` priviate #755 +- rename `Span::record_exception` to `Span::record_error` #756 +- Replace `StatusCode` and `message` with `Status` #760 +- Move `TracerProvider::force_flush` to SDK #658 + +### Removed + +- Remove `serialize` feature #738 +- Remove `StatusCode::as_str` #741 +- Remove `Tracer::with_span` #746 + ## [v0.17.0](https://github.com/open-telemetry/opentelemetry-rust/compare/v0.16.0...v0.17.0) ### Changed diff --git a/opentelemetry/Cargo.toml b/opentelemetry/Cargo.toml index 2351658079..d16a5d519b 100644 --- a/opentelemetry/Cargo.toml +++ b/opentelemetry/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry" -version = "0.17.0" +version = "1.0.0-beta.1" description = "A metrics collection and distributed tracing framework" homepage = "https://github.com/open-telemetry/opentelemetry-rust" repository = "https://github.com/open-telemetry/opentelemetry-rust" @@ -20,8 +20,8 @@ all-features = true rustdoc-args = ["--cfg", "docsrs"] [dependencies] -opentelemetry-api = { version = "0.1", path = "../opentelemetry-api" } -opentelemetry-sdk = { version = "0.1", path = "../opentelemetry-sdk" } +opentelemetry-api = { version = "1.0.0-beta.1", path = "../opentelemetry-api" } +opentelemetry-sdk = { version = "1.0.0-beta.1", path = "../opentelemetry-sdk" } [features] default = ["trace"] From 1be15ece2d418b63e0b9c7fc0ad4b98e92cb432f Mon Sep 17 00:00:00 2001 From: Julian Tescher Date: Sun, 17 Apr 2022 12:57:37 -0400 Subject: [PATCH 2/6] Switch to v0.18.0 versioning --- opentelemetry-api/CHANGELOG.md | 2 +- opentelemetry-api/Cargo.toml | 2 +- opentelemetry-aws/CHANGELOG.md | 2 +- opentelemetry-aws/Cargo.toml | 2 +- opentelemetry-contrib/CHANGELOG.md | 2 +- opentelemetry-contrib/Cargo.toml | 2 +- opentelemetry-datadog/CHANGELOG.md | 2 +- opentelemetry-datadog/Cargo.toml | 2 +- opentelemetry-dynatrace/CHANGELOG.md | 2 +- opentelemetry-dynatrace/Cargo.toml | 2 +- opentelemetry-http/CHANGELOG.md | 2 +- opentelemetry-http/Cargo.toml | 2 +- opentelemetry-jaeger/CHANGELOG.md | 2 +- opentelemetry-jaeger/Cargo.toml | 2 +- opentelemetry-otlp/CHANGELOG.md | 2 +- opentelemetry-otlp/Cargo.toml | 2 +- opentelemetry-prometheus/CHANGELOG.md | 2 +- opentelemetry-prometheus/Cargo.toml | 2 +- opentelemetry-proto/Cargo.toml | 2 +- opentelemetry-sdk/CHANGELOG.md | 2 +- opentelemetry-sdk/Cargo.toml | 4 ++-- opentelemetry-semantic-conventions/CHANGELOG.md | 2 +- opentelemetry-semantic-conventions/Cargo.toml | 2 +- opentelemetry-stackdriver/CHANGELOG.md | 2 +- opentelemetry-stackdriver/Cargo.toml | 2 +- opentelemetry-zipkin/CHANGELOG.md | 2 +- opentelemetry-zipkin/Cargo.toml | 2 +- opentelemetry-zpages/CHANGELOG.md | 2 +- opentelemetry-zpages/Cargo.toml | 2 +- opentelemetry/CHANGELOG.md | 8 ++++++-- opentelemetry/Cargo.toml | 6 +++--- 31 files changed, 39 insertions(+), 35 deletions(-) diff --git a/opentelemetry-api/CHANGELOG.md b/opentelemetry-api/CHANGELOG.md index 87b81aabde..46f8e4517b 100644 --- a/opentelemetry-api/CHANGELOG.md +++ b/opentelemetry-api/CHANGELOG.md @@ -1,5 +1,5 @@ # Changelog -## v1.0.0-beta.1 +## v0.18.0 - API split from `opentelemetry` crate diff --git a/opentelemetry-api/Cargo.toml b/opentelemetry-api/Cargo.toml index aaecaa6777..93fd875294 100644 --- a/opentelemetry-api/Cargo.toml +++ b/opentelemetry-api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-api" -version = "1.0.0-beta.1" +version = "0.18.0" edition = "2018" [dependencies] diff --git a/opentelemetry-aws/CHANGELOG.md b/opentelemetry-aws/CHANGELOG.md index 5957d03a29..a2b361c10f 100644 --- a/opentelemetry-aws/CHANGELOG.md +++ b/opentelemetry-aws/CHANGELOG.md @@ -5,7 +5,7 @@ ### Changed - reduce `tokio` feature requirements #750 -- Update to opentelemetry v1.0.0-beta.1 +- Update to opentelemetry v0.18.0 ## v0.5.0 diff --git a/opentelemetry-aws/Cargo.toml b/opentelemetry-aws/Cargo.toml index a9375b1bc6..fca3bd362a 100644 --- a/opentelemetry-aws/Cargo.toml +++ b/opentelemetry-aws/Cargo.toml @@ -22,7 +22,7 @@ default = ["trace"] trace = ["opentelemetry/trace"] [dependencies] -opentelemetry = { version = "1.0.0-beta.1", path = "../opentelemetry", features = ["trace"] } +opentelemetry = { version = "0.18.0", path = "../opentelemetry", features = ["trace"] } lazy_static = "1.4" [dev-dependencies] diff --git a/opentelemetry-contrib/CHANGELOG.md b/opentelemetry-contrib/CHANGELOG.md index 139a12e9f2..9f760071fe 100644 --- a/opentelemetry-contrib/CHANGELOG.md +++ b/opentelemetry-contrib/CHANGELOG.md @@ -5,7 +5,7 @@ ### Changed - Rename binary propagator's functions #776 -- Update to opentelemetry v1.0.0-beta.1 +- Update to opentelemetry v0.18.0 ## v0.9.0 diff --git a/opentelemetry-contrib/Cargo.toml b/opentelemetry-contrib/Cargo.toml index 6e4321a26a..b0ea31abf0 100644 --- a/opentelemetry-contrib/Cargo.toml +++ b/opentelemetry-contrib/Cargo.toml @@ -23,7 +23,7 @@ base64_format = ["base64", "binary_propagator"] binary_propagator = [] [dependencies] -opentelemetry = { version = "1.0.0-beta.1", path = "../opentelemetry", features = ["trace"] } +opentelemetry = { version = "0.18.0", path = "../opentelemetry", features = ["trace"] } base64 = { version = "0.13", optional = true } lazy_static = "1.4" diff --git a/opentelemetry-datadog/CHANGELOG.md b/opentelemetry-datadog/CHANGELOG.md index 8146e07278..2048cb155f 100644 --- a/opentelemetry-datadog/CHANGELOG.md +++ b/opentelemetry-datadog/CHANGELOG.md @@ -5,7 +5,7 @@ ### Changed - Allow custom mapping #770 -- Update to opentelemetry v1.0.0-beta.1 +- Update to opentelemetry v0.18.0 - Update to opentelemetry-http v0.7.0 - Update to opentelemetry-semantic-conventions v0.10.0 diff --git a/opentelemetry-datadog/Cargo.toml b/opentelemetry-datadog/Cargo.toml index d8c206a3c4..2d65cfad62 100644 --- a/opentelemetry-datadog/Cargo.toml +++ b/opentelemetry-datadog/Cargo.toml @@ -25,7 +25,7 @@ surf-client = ["surf", "opentelemetry-http/surf"] [dependencies] async-trait = "0.1" indexmap = "1.6" -opentelemetry = { version = "1.0.0-beta.1", path = "../opentelemetry", features = ["trace"] } +opentelemetry = { version = "0.18.0", path = "../opentelemetry", features = ["trace"] } opentelemetry-http = { version = "0.7", path = "../opentelemetry-http" } opentelemetry-semantic-conventions = { version = "0.10", path = "../opentelemetry-semantic-conventions" } rmp = "0.8" diff --git a/opentelemetry-dynatrace/CHANGELOG.md b/opentelemetry-dynatrace/CHANGELOG.md index 0275f035e8..150e3cadbc 100644 --- a/opentelemetry-dynatrace/CHANGELOG.md +++ b/opentelemetry-dynatrace/CHANGELOG.md @@ -4,7 +4,7 @@ ### Changed -- Update to opentelemetry v1.0.0-beta.1 +- Update to opentelemetry v0.18.0 - Update to opentelemetry-http v0.7.0 ## v0.1.0 diff --git a/opentelemetry-dynatrace/Cargo.toml b/opentelemetry-dynatrace/Cargo.toml index f9da740137..dabcf64e30 100644 --- a/opentelemetry-dynatrace/Cargo.toml +++ b/opentelemetry-dynatrace/Cargo.toml @@ -57,7 +57,7 @@ getrandom = { version = "0.2", optional = true } http = "0.2" isahc = { version = "1.4", default-features = false, optional = true } js-sys = { version = "0.3.5", optional = true } -opentelemetry = { version = "1.0.0-beta.1", path = "../opentelemetry", default-features = false } +opentelemetry = { version = "0.18.0", path = "../opentelemetry", default-features = false } opentelemetry-http = { version = "0.7", path = "../opentelemetry-http", default-features = false } pin-project = { version = "1.0", optional = true } reqwest = { version = "0.11", default-features = false, optional = true } diff --git a/opentelemetry-http/CHANGELOG.md b/opentelemetry-http/CHANGELOG.md index 482460fe58..a44baa1b17 100644 --- a/opentelemetry-http/CHANGELOG.md +++ b/opentelemetry-http/CHANGELOG.md @@ -4,7 +4,7 @@ ### Changed -- Update to opentelemetry v1.0.0-beta.1 +- Update to opentelemetry v0.18.0 ## v0.6.0 diff --git a/opentelemetry-http/Cargo.toml b/opentelemetry-http/Cargo.toml index 858b58434f..f55b3ef9e0 100644 --- a/opentelemetry-http/Cargo.toml +++ b/opentelemetry-http/Cargo.toml @@ -13,6 +13,6 @@ async-trait = "0.1" bytes = "1" http = "0.2" isahc = { version = "1.4", default-features = false, optional = true } -opentelemetry = { version = "1.0.0-beta.1", path = "../opentelemetry", features = ["trace"] } +opentelemetry = { version = "0.18.0", path = "../opentelemetry", features = ["trace"] } reqwest = { version = "0.11", default-features = false, features = ["blocking"], optional = true } surf = { version = "2.0", default-features = false, optional = true } diff --git a/opentelemetry-jaeger/CHANGELOG.md b/opentelemetry-jaeger/CHANGELOG.md index ec7a8ac4a7..93c4fac65d 100644 --- a/opentelemetry-jaeger/CHANGELOG.md +++ b/opentelemetry-jaeger/CHANGELOG.md @@ -7,7 +7,7 @@ - Consolidate the config errors #762 - Better configuration pipeline #748 - Add Timeout Environment Var #729 -- Update to opentelemetry v1.0.0-beta.1 +- Update to opentelemetry v0.18.0 - Update to opentelemetry-http v0.7.0 - Update to opentelemetry-semantic-conventions v0.10.0 diff --git a/opentelemetry-jaeger/Cargo.toml b/opentelemetry-jaeger/Cargo.toml index cd240fe07b..8124d5a0df 100644 --- a/opentelemetry-jaeger/Cargo.toml +++ b/opentelemetry-jaeger/Cargo.toml @@ -28,7 +28,7 @@ http = { version = "0.2", optional = true } isahc = { version = "1.4", default-features = false, optional = true } js-sys = { version = "0.3", optional = true } lazy_static = "1.4" -opentelemetry = { version = "1.0.0-beta.1", default-features = false, features = ["trace"], path = "../opentelemetry" } +opentelemetry = { version = "0.18.0", default-features = false, features = ["trace"], path = "../opentelemetry" } opentelemetry-http = { version = "0.7", path = "../opentelemetry-http", optional = true } opentelemetry-semantic-conventions = { version = "0.10", path = "../opentelemetry-semantic-conventions" } pin-project = { version = "1.0", optional = true } diff --git a/opentelemetry-otlp/CHANGELOG.md b/opentelemetry-otlp/CHANGELOG.md index 26ad244bdb..3deb0044b7 100644 --- a/opentelemetry-otlp/CHANGELOG.md +++ b/opentelemetry-otlp/CHANGELOG.md @@ -5,7 +5,7 @@ ### Changed - reduce `tokio` feature requirements #750 -- Update to opentelemetry v1.0.0-beta.1 +- Update to opentelemetry v0.18.0 - Update to opentelemetry-http v0.7.0 ## v0.10.0 diff --git a/opentelemetry-otlp/Cargo.toml b/opentelemetry-otlp/Cargo.toml index a8143dac53..88ea1952ff 100644 --- a/opentelemetry-otlp/Cargo.toml +++ b/opentelemetry-otlp/Cargo.toml @@ -35,7 +35,7 @@ futures-util = { version = "0.3", default-features = false, features = ["std"] } opentelemetry-proto = { version = "0.1", path = "../opentelemetry-proto", default-features = false } grpcio = { version = "0.9", optional = true } -opentelemetry = { version = "1.0.0-beta.1", default-features = false, features = ["trace"], path = "../opentelemetry" } +opentelemetry = { version = "0.18.0", default-features = false, features = ["trace"], path = "../opentelemetry" } opentelemetry-http = { version = "0.7", path = "../opentelemetry-http", optional = true } protobuf = { version = "2.18", optional = true } diff --git a/opentelemetry-prometheus/CHANGELOG.md b/opentelemetry-prometheus/CHANGELOG.md index cde2fd238a..2aed1a66a2 100644 --- a/opentelemetry-prometheus/CHANGELOG.md +++ b/opentelemetry-prometheus/CHANGELOG.md @@ -4,7 +4,7 @@ ### Changed -- Update to opentelemetry v1.0.0-beta.1 +- Update to opentelemetry v0.18.0 ### Removed diff --git a/opentelemetry-prometheus/Cargo.toml b/opentelemetry-prometheus/Cargo.toml index 75b2fe678f..5fd02a1de4 100644 --- a/opentelemetry-prometheus/Cargo.toml +++ b/opentelemetry-prometheus/Cargo.toml @@ -19,7 +19,7 @@ all-features = true rustdoc-args = ["--cfg", "docsrs"] [dependencies] -opentelemetry = { version = "1.0.0-beta.1", path = "../opentelemetry", default-features = false, features = ["metrics"] } +opentelemetry = { version = "0.18.0", path = "../opentelemetry", default-features = false, features = ["metrics"] } prometheus = "0.13" protobuf = "2.14" diff --git a/opentelemetry-proto/Cargo.toml b/opentelemetry-proto/Cargo.toml index c97572c53d..3fbdc31a69 100644 --- a/opentelemetry-proto/Cargo.toml +++ b/opentelemetry-proto/Cargo.toml @@ -50,7 +50,7 @@ grpcio = { version = "0.9", optional = true } tonic = { version = "0.6.2", optional = true } prost = { version = "0.9", optional = true } protobuf = { version = "2.18", optional = true } # todo: update to 3.0 so we have docs for generated types. -opentelemetry = { version = "1.0.0-beta.1", default-features = false, features = ["trace", "metrics"], path = "../opentelemetry" } +opentelemetry = { version = "0.18.0", default-features = false, features = ["trace", "metrics"], path = "../opentelemetry" } futures = { version = "0.3", default-features = false, features = ["std"] } futures-util = { version = "0.3", default-features = false, features = ["std"] } serde = { version = "1.0", optional = true } diff --git a/opentelemetry-sdk/CHANGELOG.md b/opentelemetry-sdk/CHANGELOG.md index c9a61f7d28..ec483b74ed 100644 --- a/opentelemetry-sdk/CHANGELOG.md +++ b/opentelemetry-sdk/CHANGELOG.md @@ -1,5 +1,5 @@ # Changelog -## v1.0.0-beta.1 +## v0.18.0 - SDK split from `opentelemetry` crate diff --git a/opentelemetry-sdk/Cargo.toml b/opentelemetry-sdk/Cargo.toml index 53f07cf423..20862cc9a7 100644 --- a/opentelemetry-sdk/Cargo.toml +++ b/opentelemetry-sdk/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "opentelemetry-sdk" -version = "1.0.0-beta.1" +version = "0.18.0" edition = "2018" [dependencies] -opentelemetry-api = { version = "1.0.0-beta.1", path = "../opentelemetry-api/" } +opentelemetry-api = { version = "0.18.0", path = "../opentelemetry-api/" } async-std = { version = "1.6", features = ["unstable"], optional = true } async-trait = { version = "0.1", optional = true } dashmap = { version = "4.0.1", optional = true } diff --git a/opentelemetry-semantic-conventions/CHANGELOG.md b/opentelemetry-semantic-conventions/CHANGELOG.md index 0363ee3dec..cc0a9a86ef 100644 --- a/opentelemetry-semantic-conventions/CHANGELOG.md +++ b/opentelemetry-semantic-conventions/CHANGELOG.md @@ -5,7 +5,7 @@ ### Changed - update to v1.9 spec #754 -- Update to opentelemetry v1.0.0-beta.1 +- Update to opentelemetry v0.18.0 ## v0.9.0 diff --git a/opentelemetry-semantic-conventions/Cargo.toml b/opentelemetry-semantic-conventions/Cargo.toml index 601878c886..141a0526ef 100644 --- a/opentelemetry-semantic-conventions/Cargo.toml +++ b/opentelemetry-semantic-conventions/Cargo.toml @@ -19,7 +19,7 @@ all-features = true rustdoc-args = ["--cfg", "docsrs"] [dependencies] -opentelemetry = { version = "1.0.0-beta.1", default-features = false, path = "../opentelemetry" } +opentelemetry = { version = "0.18.0", default-features = false, path = "../opentelemetry" } [dev-dependencies] opentelemetry = { default-features = false, features = ["trace"], path = "../opentelemetry" } diff --git a/opentelemetry-stackdriver/CHANGELOG.md b/opentelemetry-stackdriver/CHANGELOG.md index 0b78a31918..5a3024a592 100644 --- a/opentelemetry-stackdriver/CHANGELOG.md +++ b/opentelemetry-stackdriver/CHANGELOG.md @@ -8,7 +8,7 @@ ### Changed -- Upgrade to opentelemetry v1.0.0-beta.1 +- Upgrade to opentelemetry v0.18.0 - Upgrade to opentelemetry-semantic-conventions v0.10 ## v0.14.0 diff --git a/opentelemetry-stackdriver/Cargo.toml b/opentelemetry-stackdriver/Cargo.toml index 65cdde5bc2..c73d76b08d 100644 --- a/opentelemetry-stackdriver/Cargo.toml +++ b/opentelemetry-stackdriver/Cargo.toml @@ -16,7 +16,7 @@ hex = "0.4" http = "0.2" hyper = "0.14.2" hyper-rustls = { version = "0.22.1", optional = true } -opentelemetry = { version = "1.0.0-beta.1", path = "../opentelemetry" } +opentelemetry = { version = "0.18.0", path = "../opentelemetry" } opentelemetry-semantic-conventions = { version = "0.10", path = "../opentelemetry-semantic-conventions" } prost = "0.9" prost-types = "0.9" diff --git a/opentelemetry-zipkin/CHANGELOG.md b/opentelemetry-zipkin/CHANGELOG.md index 2e1e12261f..730f9e5aeb 100644 --- a/opentelemetry-zipkin/CHANGELOG.md +++ b/opentelemetry-zipkin/CHANGELOG.md @@ -9,7 +9,7 @@ ## Changed - Add defaults for timeouts to HTTP clients #718 -- Update to opentelemetry v1.0.0-beta.1 +- Update to opentelemetry v0.18.0 - Update to opentelemetry-http v0.7.0 - Update to opentelemetry-semantic-conventions v0.10.0 diff --git a/opentelemetry-zipkin/Cargo.toml b/opentelemetry-zipkin/Cargo.toml index db15b73c99..79be6ae9a7 100644 --- a/opentelemetry-zipkin/Cargo.toml +++ b/opentelemetry-zipkin/Cargo.toml @@ -27,7 +27,7 @@ surf-client = ["surf", "opentelemetry-http/surf"] [dependencies] async-trait = "0.1" -opentelemetry = { version = "1.0.0-beta.1", path = "../opentelemetry", features = ["trace"] } +opentelemetry = { version = "0.18.0", path = "../opentelemetry", features = ["trace"] } opentelemetry-http = { version = "0.7", path = "../opentelemetry-http", optional = true } opentelemetry-semantic-conventions = { version = "0.10", path = "../opentelemetry-semantic-conventions" } serde_json = "1.0" diff --git a/opentelemetry-zpages/CHANGELOG.md b/opentelemetry-zpages/CHANGELOG.md index 851065bc04..f7a217639c 100644 --- a/opentelemetry-zpages/CHANGELOG.md +++ b/opentelemetry-zpages/CHANGELOG.md @@ -4,7 +4,7 @@ ### Changed -- Update to opentelemetry v1.0.0-beta.1 +- Update to opentelemetry v0.18.0 ## v0.2.0 diff --git a/opentelemetry-zpages/Cargo.toml b/opentelemetry-zpages/Cargo.toml index be2c4bdcec..e5010c6a40 100644 --- a/opentelemetry-zpages/Cargo.toml +++ b/opentelemetry-zpages/Cargo.toml @@ -19,7 +19,7 @@ all-features = true rustdoc-args = ["--cfg", "docsrs"] [dependencies] -opentelemetry = { version = "1.0.0-beta.1", path = "../opentelemetry", default-features = false, features = ["trace"] } +opentelemetry = { version = "0.18.0", path = "../opentelemetry", default-features = false, features = ["trace"] } opentelemetry-proto = { version = "0.1", path = "../opentelemetry-proto", features = ["with-serde", "zpages", "gen-protoc"], default-features = false } async-channel = "1.6" futures-channel = "0.3" diff --git a/opentelemetry/CHANGELOG.md b/opentelemetry/CHANGELOG.md index 93e2c1f7d9..78a7348a60 100644 --- a/opentelemetry/CHANGELOG.md +++ b/opentelemetry/CHANGELOG.md @@ -1,6 +1,10 @@ # Changelog -## [v1.0.0-beta.1](https://github.com/open-telemetry/opentelemetry-rust/compare/v0.17.0...v1.0.0-beta.1) +## [v0.18.0](https://github.com/open-telemetry/opentelemetry-rust/compare/v0.17.0...v0.18.0) + +This release is the first beta release of the `trace` API and SDK. If no other +breaking changes are necessary, the next release will be 1.0. The `metrics` API +and SDK are still unstable. ### Added @@ -14,7 +18,7 @@ - `meter_with_version` accepts optional parameter for `version` and `schema_url` #752 - Unify `Event` and `Link` access patterns #757 - move `SpanKind` display format impl to jaeger crate #758 -- make `TraceStateError` priviate #755 +- make `TraceStateError` private #755 - rename `Span::record_exception` to `Span::record_error` #756 - Replace `StatusCode` and `message` with `Status` #760 - Move `TracerProvider::force_flush` to SDK #658 diff --git a/opentelemetry/Cargo.toml b/opentelemetry/Cargo.toml index d16a5d519b..67c5332b9e 100644 --- a/opentelemetry/Cargo.toml +++ b/opentelemetry/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry" -version = "1.0.0-beta.1" +version = "0.18.0" description = "A metrics collection and distributed tracing framework" homepage = "https://github.com/open-telemetry/opentelemetry-rust" repository = "https://github.com/open-telemetry/opentelemetry-rust" @@ -20,8 +20,8 @@ all-features = true rustdoc-args = ["--cfg", "docsrs"] [dependencies] -opentelemetry-api = { version = "1.0.0-beta.1", path = "../opentelemetry-api" } -opentelemetry-sdk = { version = "1.0.0-beta.1", path = "../opentelemetry-sdk" } +opentelemetry-api = { version = "0.18.0", path = "../opentelemetry-api" } +opentelemetry-sdk = { version = "0.18.0", path = "../opentelemetry-sdk" } [features] default = ["trace"] From 6121e124aafa4c894741693056c4430e5bb3f982 Mon Sep 17 00:00:00 2001 From: Julian Tescher Date: Wed, 20 Apr 2022 19:13:56 -0400 Subject: [PATCH 3/6] Update changelog --- opentelemetry/CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/opentelemetry/CHANGELOG.md b/opentelemetry/CHANGELOG.md index 78a7348a60..45ea12179a 100644 --- a/opentelemetry/CHANGELOG.md +++ b/opentelemetry/CHANGELOG.md @@ -10,6 +10,8 @@ and SDK are still unstable. - Pull sampling probability from `OTEL_TRACES_SAMPLER_ARG` in default sdk config #737 - Add `schema_url` to `Tracer` #743 +- Add `schema_url` to `Resource` #775 +- Add `Span::set_attributes` #638 ### Changed From ffaf3a323ff0d5ca87bcba96f2b546c46b8fd424 Mon Sep 17 00:00:00 2001 From: Julian Tescher Date: Sun, 3 Jul 2022 18:44:06 -0700 Subject: [PATCH 4/6] Merge branch 'main' into relase --- .github/workflows/ci.yml | 2 +- Cargo.toml | 1 + examples/actix-http-tracing/Cargo.toml | 14 +- examples/actix-http-tracing/src/main.rs | 4 +- examples/actix-http/Cargo.toml | 5 +- examples/actix-http/src/main.rs | 1 + examples/actix-udp/Cargo.toml | 7 +- examples/basic-otlp-http/README.md | 3 +- examples/basic-otlp-http/docker-compose.yaml | 2 +- examples/basic-otlp/Dockerfile | 6 + examples/basic-otlp/README.md | 31 +- examples/basic-otlp/docker-compose.yaml | 37 + .../basic-otlp/otel-collector-config.yaml | 35 + .../external-otlp-grpcio-async-std/Cargo.toml | 2 +- examples/external-otlp-tonic-tokio/Cargo.toml | 2 +- .../external-otlp-tonic-tokio/src/main.rs | 2 +- examples/jaeger-remote-sampler/Cargo.toml | 12 + examples/jaeger-remote-sampler/README.md | 47 + .../jaeger-remote-sampler/docker-compose.yaml | 26 + .../jaeger-remote-sampler/otel-collector.yaml | 17 + examples/jaeger-remote-sampler/src/main.rs | 41 + .../jaeger-remote-sampler/strategies.json | 37 + opentelemetry-api/Cargo.toml | 4 +- opentelemetry-api/src/baggage.rs | 5 +- opentelemetry-api/src/common.rs | 207 +- opentelemetry-api/src/global/error_handler.rs | 6 +- opentelemetry-api/src/global/metrics.rs | 11 +- opentelemetry-api/src/global/propagation.rs | 14 +- opentelemetry-api/src/global/trace.rs | 11 +- opentelemetry-api/src/lib.rs | 2 +- opentelemetry-api/src/metrics/noop.rs | 14 +- opentelemetry-api/src/trace/context.rs | 15 +- opentelemetry-api/src/trace/mod.rs | 2 + opentelemetry-api/src/trace/order_map.rs | 670 ++++++ opentelemetry-api/src/trace/tracer.rs | 27 +- opentelemetry-aws/Cargo.toml | 4 +- opentelemetry-aws/src/lib.rs | 6 +- opentelemetry-contrib/Cargo.toml | 12 +- .../src/trace/exporter/jaeger_json.rs | 299 +++ .../src/trace/exporter/mod.rs | 13 + opentelemetry-contrib/src/trace/mod.rs | 1 + .../src/trace/propagator/mod.rs | 1 - opentelemetry-datadog/Cargo.toml | 8 +- opentelemetry-datadog/src/exporter/mod.rs | 148 +- .../src/exporter/model/mod.rs | 30 +- .../src/exporter/model/v03.rs | 16 +- .../src/exporter/model/v05.rs | 14 +- opentelemetry-datadog/src/lib.rs | 9 +- opentelemetry-dynatrace/Cargo.toml | 4 +- .../src/transform/metrics.rs | 12 +- opentelemetry-http/Cargo.toml | 2 +- opentelemetry-http/README.md | 2 +- opentelemetry-http/src/lib.rs | 9 +- opentelemetry-jaeger/Cargo.toml | 21 +- opentelemetry-jaeger/README.md | 2 +- .../src/exporter/config/agent.rs | 22 +- .../exporter/config/collector/http_client.rs | 6 +- .../src/exporter/config/collector/mod.rs | 8 +- .../src/exporter/config/mod.rs | 70 +- opentelemetry-jaeger/src/exporter/mod.rs | 122 +- opentelemetry-jaeger/src/lib.rs | 15 +- opentelemetry-otlp/Cargo.toml | 6 +- opentelemetry-otlp/src/exporter/http.rs | 11 +- opentelemetry-otlp/src/exporter/mod.rs | 65 +- opentelemetry-otlp/src/lib.rs | 13 +- opentelemetry-otlp/src/metric.rs | 28 +- opentelemetry-otlp/src/span.rs | 156 +- opentelemetry-otlp/src/transform/metrics.rs | 28 +- opentelemetry-otlp/src/transform/resource.rs | 8 + opentelemetry-prometheus/Cargo.toml | 2 +- opentelemetry-proto/Cargo.toml | 10 +- opentelemetry-proto/build.rs | 1 - opentelemetry-proto/src/transform/common.rs | 4 +- opentelemetry-proto/src/transform/traces.rs | 39 +- opentelemetry-sdk/Cargo.toml | 16 +- .../benches/batch_span_processor.rs | 4 +- opentelemetry-sdk/benches/trace.rs | 6 +- .../src/export/metrics/stdout.rs | 2 +- opentelemetry-sdk/src/export/trace/mod.rs | 9 +- opentelemetry-sdk/src/export/trace/stdout.rs | 23 +- .../src/metrics/controllers/push.rs | 8 +- opentelemetry-sdk/src/propagation/baggage.rs | 11 +- .../src/propagation/trace_context.rs | 9 +- opentelemetry-sdk/src/resource/mod.rs | 118 +- opentelemetry-sdk/src/resource/process.rs | 12 +- opentelemetry-sdk/src/runtime.rs | 10 +- opentelemetry-sdk/src/testing/trace.rs | 30 +- opentelemetry-sdk/src/trace/config.rs | 24 +- opentelemetry-sdk/src/trace/mod.rs | 3 + opentelemetry-sdk/src/trace/provider.rs | 86 +- opentelemetry-sdk/src/trace/sampler.rs | 116 +- .../src/trace/sampler/jaeger_remote/mod.rs | 10 + .../trace/sampler/jaeger_remote/rate_limit.rs | 108 + .../src/trace/sampler/jaeger_remote/remote.rs | 87 + .../trace/sampler/jaeger_remote/sampler.rs | 258 +++ .../jaeger_remote/sampling_strategy.rs | 226 ++ opentelemetry-sdk/src/trace/span.rs | 11 +- opentelemetry-sdk/src/trace/span_processor.rs | 301 ++- opentelemetry-sdk/src/trace/tracer.rs | 22 +- opentelemetry-semantic-conventions/Cargo.toml | 2 +- .../src/trace.rs | 4 +- opentelemetry-stackdriver/Cargo.toml | 15 +- .../google/logging/type/http_request.proto | 3 +- .../google/logging/type/log_severity.proto | 5 +- .../proto/google/logging/v2/log_entry.proto | 49 +- .../proto/google/logging/v2/logging.proto | 121 +- .../google/logging/v2/logging_config.proto | 794 +++++-- opentelemetry-stackdriver/src/lib.rs | 94 +- .../src/proto/devtools/cloudtrace/v2.rs | 35 +- .../src/proto/logging/v2.rs | 1870 +++-------------- opentelemetry-stackdriver/tests/generate.rs | 19 +- opentelemetry-zipkin/Cargo.toml | 7 +- opentelemetry-zipkin/README.md | 2 +- opentelemetry-zipkin/src/exporter/mod.rs | 53 +- .../src/exporter/model/span.rs | 4 +- opentelemetry-zipkin/src/exporter/uploader.rs | 9 +- opentelemetry-zipkin/src/lib.rs | 32 +- opentelemetry-zipkin/src/propagator/mod.rs | 24 +- opentelemetry-zpages/Cargo.toml | 1 - opentelemetry-zpages/src/lib.rs | 3 - opentelemetry-zpages/src/trace/aggregator.rs | 28 +- opentelemetry/README.md | 2 +- scripts/test.sh | 2 +- 123 files changed, 4506 insertions(+), 2731 deletions(-) create mode 100644 examples/basic-otlp/Dockerfile create mode 100644 examples/basic-otlp/docker-compose.yaml create mode 100644 examples/basic-otlp/otel-collector-config.yaml create mode 100644 examples/jaeger-remote-sampler/Cargo.toml create mode 100644 examples/jaeger-remote-sampler/README.md create mode 100644 examples/jaeger-remote-sampler/docker-compose.yaml create mode 100644 examples/jaeger-remote-sampler/otel-collector.yaml create mode 100644 examples/jaeger-remote-sampler/src/main.rs create mode 100644 examples/jaeger-remote-sampler/strategies.json create mode 100644 opentelemetry-api/src/trace/order_map.rs create mode 100644 opentelemetry-contrib/src/trace/exporter/jaeger_json.rs create mode 100644 opentelemetry-contrib/src/trace/exporter/mod.rs create mode 100644 opentelemetry-sdk/src/trace/sampler/jaeger_remote/mod.rs create mode 100644 opentelemetry-sdk/src/trace/sampler/jaeger_remote/rate_limit.rs create mode 100644 opentelemetry-sdk/src/trace/sampler/jaeger_remote/remote.rs create mode 100644 opentelemetry-sdk/src/trace/sampler/jaeger_remote/sampler.rs create mode 100644 opentelemetry-sdk/src/trace/sampler/jaeger_remote/sampling_strategy.rs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5b5d791054..992219dc66 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -68,7 +68,7 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: 1.49.0 + toolchain: 1.49 override: true - name: Run tests run: cargo --version && diff --git a/Cargo.toml b/Cargo.toml index b7eaef93f6..6a30d46dbd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,6 +32,7 @@ members = [ "examples/http", "examples/hyper-prometheus", "examples/tracing-grpc", + "examples/jaeger-remote-sampler", "examples/zipkin", "examples/multiple-span-processors", "examples/zpages" diff --git a/examples/actix-http-tracing/Cargo.toml b/examples/actix-http-tracing/Cargo.toml index 5129887342..7ea1314568 100644 --- a/examples/actix-http-tracing/Cargo.toml +++ b/examples/actix-http-tracing/Cargo.toml @@ -5,11 +5,11 @@ edition = "2018" publish = false [dependencies] -actix-web = "3.2" -actix-web-opentelemetry = { version = "0.9", features = ["metrics"] } -opentelemetry = { version = "0.11", features = ["metrics", "tokio"] } -opentelemetry-jaeger = { version = "0.10", features = ["tokio"] } -opentelemetry-prometheus = "0.4" +actix-web = "4.1" +actix-web-opentelemetry = { version = "0.12", features = ["metrics"] } +opentelemetry = { version = "0.17", features = ["metrics", "tokio"] } +opentelemetry-jaeger = { version = "0.16", features = ["tokio"] } +opentelemetry-prometheus = "0.10" tracing = "0.1" -tracing-opentelemetry = "0.10" -tracing-subscriber = "0.2" +tracing-opentelemetry = "0.17" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } diff --git a/examples/actix-http-tracing/src/main.rs b/examples/actix-http-tracing/src/main.rs index 443fbb3b53..1800790101 100644 --- a/examples/actix-http-tracing/src/main.rs +++ b/examples/actix-http-tracing/src/main.rs @@ -30,9 +30,9 @@ async fn main() -> io::Result<()> { // Start an otel jaeger trace pipeline global::set_text_map_propagator(TraceContextPropagator::new()); - let (tracer, _uninstall) = opentelemetry_jaeger::new_pipeline() + let tracer = opentelemetry_jaeger::new_pipeline() .with_service_name("app_name") - .install() + .install_simple() .unwrap(); // Initialize `tracing` using `opentelemetry-tracing` and configure logging diff --git a/examples/actix-http/Cargo.toml b/examples/actix-http/Cargo.toml index 6d7aa9536d..8f36683f13 100644 --- a/examples/actix-http/Cargo.toml +++ b/examples/actix-http/Cargo.toml @@ -7,8 +7,7 @@ publish = false [dependencies] opentelemetry = { path = "../../opentelemetry", features = ["rt-tokio"] } opentelemetry-jaeger = { path = "../../opentelemetry-jaeger", features = ["reqwest_collector_client", "rt-tokio-current-thread"] } -thrift = "0.13" -actix-web = "4.0.0" +actix-web = "4.1.0" actix-service = "2.0.0" -env_logger = "0.8.2" +env_logger = "0.9.0" tokio = { version = "1", features = ["full"] } diff --git a/examples/actix-http/src/main.rs b/examples/actix-http/src/main.rs index e7ee6f08e8..a4e4c5ac4b 100644 --- a/examples/actix-http/src/main.rs +++ b/examples/actix-http/src/main.rs @@ -13,6 +13,7 @@ fn init_tracer() -> Result { opentelemetry_jaeger::new_collector_pipeline() .with_endpoint("http://127.0.0.1:14268/api/traces") .with_service_name("trace-http-demo") + .with_reqwest() .install_batch(opentelemetry::runtime::TokioCurrentThread) } diff --git a/examples/actix-udp/Cargo.toml b/examples/actix-udp/Cargo.toml index a911c1be1f..a80fb405e4 100644 --- a/examples/actix-udp/Cargo.toml +++ b/examples/actix-udp/Cargo.toml @@ -7,7 +7,6 @@ publish = false [dependencies] opentelemetry = { path = "../../opentelemetry" } opentelemetry-jaeger = { path = "../../opentelemetry-jaeger" } -thrift = "0.13" -actix-web = "3" -actix-service = "1" -env_logger = "0.8.2" +actix-web = "4.1" +actix-service = "2" +env_logger = "0.9" diff --git a/examples/basic-otlp-http/README.md b/examples/basic-otlp-http/README.md index cca9f222d3..098c2e5bf0 100644 --- a/examples/basic-otlp-http/README.md +++ b/examples/basic-otlp-http/README.md @@ -2,7 +2,8 @@ * Run the application locally, to run as a docker container you have to change the relative paths from the `Cargo.toml` * The Collector then sends the data to the appropriate backend, in this case JAEGER -This demo uses `docker-compose` and by default runs against the `otel/opentelemetry-collector-dev:latest` image. +This demo uses `docker-compose` and by default runs against the `otel/opentelemetry-collector-dev:latest` image, +and uses `http` as the transport. ```shell docker-compose up diff --git a/examples/basic-otlp-http/docker-compose.yaml b/examples/basic-otlp-http/docker-compose.yaml index 0cbc679348..cd81583921 100644 --- a/examples/basic-otlp-http/docker-compose.yaml +++ b/examples/basic-otlp-http/docker-compose.yaml @@ -18,7 +18,7 @@ services: ports: - "1888:1888" # pprof extension - "13133:13133" # health_check extension - - "4317" # OTLP gRPC receiver + - "4317:4317" # OTLP gRPC receiver - "4318:4318" # OTLP HTTP receiver - "55670:55679" # zpages extension depends_on: diff --git a/examples/basic-otlp/Dockerfile b/examples/basic-otlp/Dockerfile new file mode 100644 index 0000000000..b63241e283 --- /dev/null +++ b/examples/basic-otlp/Dockerfile @@ -0,0 +1,6 @@ +FROM rust:1.51 +COPY . /usr/src/basic-otlp/ +WORKDIR /usr/src/basic-otlp/ +RUN cargo build --release +RUN cargo install --path . +CMD ["/usr/local/cargo/bin/basic-otlp"] diff --git a/examples/basic-otlp/README.md b/examples/basic-otlp/README.md index 3c463b524a..d1b71a1228 100644 --- a/examples/basic-otlp/README.md +++ b/examples/basic-otlp/README.md @@ -4,6 +4,33 @@ This example shows basic span and metric usage, and exports to the [OpenTelemetr ## Usage +### `docker-compose` + +By default runs against the `otel/opentelemetry-collector-dev:latest` image, and uses the `tonic`'s +`grpc` example as the transport. + +```shell +docker-compose up +or +docker-compose up -d +``` + +In another terminal run the application `cargo run` + +Use the browser to see the trace: +- Jaeger at http://0.0.0.0:16686 + +Tear it down: + +```shell +docker-compose down +``` + +### Manual + +If you don't want to use `docker-compose`, you can manually run the `otel/opentelemetry-collector` container +and inspect the logs to see traces being transferred. + ```shell # Run `opentelemetry-collector` $ docker run -p4317:4317 otel/opentelemetry-collector:latest @@ -90,4 +117,6 @@ Data point attributes: StartTimestamp: 2021-11-19 04:07:46.29555 +0000 UTC Timestamp: 2021-11-19 04:08:36.297279 +0000 UTC Value: 1.000000 -``` \ No newline at end of file +``` + + diff --git a/examples/basic-otlp/docker-compose.yaml b/examples/basic-otlp/docker-compose.yaml new file mode 100644 index 0000000000..cd81583921 --- /dev/null +++ b/examples/basic-otlp/docker-compose.yaml @@ -0,0 +1,37 @@ +version: "2" +services: + + # Jaeger + jaeger-all-in-one: + image: jaegertracing/all-in-one:latest + ports: + - "16686:16686" + - "14268" + - "14250" + + # Collector + otel-collector: + image: otel/opentelemetry-collector:latest + command: ["--config=/etc/otel-collector-config.yaml", "${OTELCOL_ARGS}"] + volumes: + - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml + ports: + - "1888:1888" # pprof extension + - "13133:13133" # health_check extension + - "4317:4317" # OTLP gRPC receiver + - "4318:4318" # OTLP HTTP receiver + - "55670:55679" # zpages extension + depends_on: + - jaeger-all-in-one + + + # metrics-rust: + # build: + # dockerfile: $PWD/Dockerfile + # context: ./basic-otlp-http + # environment: + # - OTLP_TONIC_ENDPOINT=otel-collector:4317 + # depends_on: + # - otel-collector + + diff --git a/examples/basic-otlp/otel-collector-config.yaml b/examples/basic-otlp/otel-collector-config.yaml new file mode 100644 index 0000000000..1a822eee79 --- /dev/null +++ b/examples/basic-otlp/otel-collector-config.yaml @@ -0,0 +1,35 @@ +receivers: + otlp: + protocols: + http: + grpc: + +exporters: + logging: + loglevel: debug + + jaeger: + endpoint: jaeger-all-in-one:14250 + insecure: true + +processors: + batch: + +extensions: + health_check: + pprof: + endpoint: :1888 + zpages: + endpoint: :55679 + +service: + extensions: [pprof, zpages, health_check] + pipelines: + traces: + receivers: [otlp] + processors: [batch] + exporters: [logging, jaeger] + metrics: + receivers: [otlp] + processors: [batch] + exporters: [logging] diff --git a/examples/external-otlp-grpcio-async-std/Cargo.toml b/examples/external-otlp-grpcio-async-std/Cargo.toml index 4e1e262811..3b70e775e6 100644 --- a/examples/external-otlp-grpcio-async-std/Cargo.toml +++ b/examples/external-otlp-grpcio-async-std/Cargo.toml @@ -5,7 +5,7 @@ edition = "2018" publish = false [dependencies] -async-std = { version = "1.9.0", features = ["attributes"] } +async-std = { version = "= 1.10.0", features = ["attributes"] } env_logger = "0.8.2" opentelemetry = { path = "../../opentelemetry", features = ["rt-async-std"] } opentelemetry-otlp = { path = "../../opentelemetry-otlp", features = [ diff --git a/examples/external-otlp-tonic-tokio/Cargo.toml b/examples/external-otlp-tonic-tokio/Cargo.toml index df53269280..bcd38463cb 100644 --- a/examples/external-otlp-tonic-tokio/Cargo.toml +++ b/examples/external-otlp-tonic-tokio/Cargo.toml @@ -9,5 +9,5 @@ opentelemetry = { path = "../../opentelemetry", features = ["rt-tokio", "metrics opentelemetry-otlp = { path = "../../opentelemetry-otlp", features = ["tonic", "tls", "tls-roots"] } serde_json = "1.0" tokio = { version = "1.0", features = ["full"] } -tonic = "0.6.2" +tonic = "0.7.1" url = "2.2.0" diff --git a/examples/external-otlp-tonic-tokio/src/main.rs b/examples/external-otlp-tonic-tokio/src/main.rs index ed71df3108..28e819c6a8 100644 --- a/examples/external-otlp-tonic-tokio/src/main.rs +++ b/examples/external-otlp-tonic-tokio/src/main.rs @@ -62,7 +62,7 @@ fn init_tracer() -> Result { opentelemetry_otlp::new_exporter() .tonic() .with_endpoint(endpoint.as_str()) - .with_metadata(dbg!(metadata)) + .with_metadata(metadata) .with_tls_config( ClientTlsConfig::new().domain_name( endpoint diff --git a/examples/jaeger-remote-sampler/Cargo.toml b/examples/jaeger-remote-sampler/Cargo.toml new file mode 100644 index 0000000000..9141fa08dd --- /dev/null +++ b/examples/jaeger-remote-sampler/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "jaeger-remote-sampler" +version = "0.1.0" +edition = "2018" + +[dependencies] +opentelemetry-sdk = { path = "../../opentelemetry-sdk", features = ["rt-tokio", "jaeger_remote_sampler"] } +opentelemetry-api = { path = "../../opentelemetry-api" } +opentelemetry-http = { path = "../../opentelemetry-http", features = ["reqwest"] } +reqwest = "0.11.10" +tokio = { version = "1.18", features = ["macros", "rt-multi-thread"] } + diff --git a/examples/jaeger-remote-sampler/README.md b/examples/jaeger-remote-sampler/README.md new file mode 100644 index 0000000000..4f9dab603a --- /dev/null +++ b/examples/jaeger-remote-sampler/README.md @@ -0,0 +1,47 @@ +# Jaeger remote sampler + +When services generate too many spans. We need to sample some spans to save cost and speed up the queries. + +Adaptive sampling works in the Jaeger collector by observing the spans received from services and recalculating sampling +probabilities for each service/endpoint combination to ensure that the volume is relatively constant. + +## Setup + +Start a jaeger collector and an opentelemetry collector locally using docker + +``` +docker-comopse run -d +``` + +It will allow you to + +- query sampling strategies from jaeger collect at port 5578. `http://localhost:5778/sampling?service=foo` +- query sampling strategies from opentelemetry collector at port 5579. `http://localhost:5779/sampling?service=foo` + +## Run the example + +After start the jaeger remote sampling server successfully. We can run + +`cargo run` + +command to start the example, you should see something like only one span is printed out. + +Looking at the example, you will notice we use `AlwaysOff` as our default sampler. It means before the SDK get the sampling strategy from remote server, no span will be sampled. + +Once the SDK fetched the remote strategy, we will start a probability sampler internally. In this case, we set the probability to 1.0 for all spans. This is defined by + +``` +"service": "foo", +"type": "probabilistic", +"param": 1, +``` + +Feel free to tune the `param` and see if the probability of sampling changes. + +## Strategies + +The sampling strategies is defined in `srategies.json` files. It defines two set of strategies. + +The first strategy is returned for `foo` service. The second strategy is catch all default strategy for all other +services. + diff --git a/examples/jaeger-remote-sampler/docker-compose.yaml b/examples/jaeger-remote-sampler/docker-compose.yaml new file mode 100644 index 0000000000..8f41f07319 --- /dev/null +++ b/examples/jaeger-remote-sampler/docker-compose.yaml @@ -0,0 +1,26 @@ +version: "3" +services: + + # jaeger collector + jaeger-all-in-one: + image: jaegertracing/all-in-one:latest + ports: + - "16686:16686" + - "14268" + - "14250" + - "5778:5778" + container_name: jaeger-collector + volumes: + - ./strategies.json:/etc/jaeger/custom_strategies.json + environment: + - SAMPLING_STRATEGIES_FILE=/etc/jaeger/custom_strategies.json + + # opentelemetry collector + otel-collector: + image: otel/opentelemetry-collector:latest + command: [ "--config=/etc/otel-collector.yaml" ] + volumes: + - ./otel-collector.yaml:/etc/otel-collector.yaml + - ./strategies.json:/etc/strategies.json + ports: + - "5779:5778" # default jaeger remote sampling port \ No newline at end of file diff --git a/examples/jaeger-remote-sampler/otel-collector.yaml b/examples/jaeger-remote-sampler/otel-collector.yaml new file mode 100644 index 0000000000..e5f1280a2e --- /dev/null +++ b/examples/jaeger-remote-sampler/otel-collector.yaml @@ -0,0 +1,17 @@ +receivers: + jaeger: + protocols: + grpc: + remote_sampling: + host_endpoint: "0.0.0.0:5778" # default port + insecure: true + strategy_file: "/etc/strategies.json" + +exporters: + logging: + +service: + pipelines: + traces: + receivers: [ jaeger ] + exporters: [ logging ] \ No newline at end of file diff --git a/examples/jaeger-remote-sampler/src/main.rs b/examples/jaeger-remote-sampler/src/main.rs new file mode 100644 index 0000000000..37c3d748b2 --- /dev/null +++ b/examples/jaeger-remote-sampler/src/main.rs @@ -0,0 +1,41 @@ +use opentelemetry_api::global; +use opentelemetry_api::trace::Tracer; +use opentelemetry_sdk::export::trace::stdout::Exporter as StdoutExporter; +use opentelemetry_sdk::runtime; +use opentelemetry_sdk::trace::{Sampler, TracerProvider as SdkTracerProvider}; +use std::time::Duration; + +fn setup() { + let client = reqwest::Client::new(); + + let sampler = Sampler::jaeger_remote(runtime::Tokio, client, Sampler::AlwaysOff, "foo") + .with_endpoint("http://localhost:5778/sampling") // setup jaeger remote sampler endpoint + .with_update_interval(Duration::from_secs(5)) // will call jaeger sampling endpoint every 5 secs. + .build() + .unwrap(); + + let config = opentelemetry_sdk::trace::config().with_sampler(sampler); + + let provider = SdkTracerProvider::builder() + .with_config(config) + .with_simple_exporter(StdoutExporter::new(std::io::stdout(), true)) + .build(); + + global::set_tracer_provider(provider); +} + +#[tokio::main] +async fn main() { + setup(); + let tracer = global::tracer("test"); + + { + let _not_sampled_span = tracer.start("test"); + } + + tokio::time::sleep(Duration::from_secs(10)).await; + + { + let _sampled_span = tracer.start("should_record"); + } +} diff --git a/examples/jaeger-remote-sampler/strategies.json b/examples/jaeger-remote-sampler/strategies.json new file mode 100644 index 0000000000..3ba614e48e --- /dev/null +++ b/examples/jaeger-remote-sampler/strategies.json @@ -0,0 +1,37 @@ +{ + "service_strategies": [ + { + "service": "foo", + "type": "probabilistic", + "param": 1, + "operation_strategies": [ + { + "operation": "op1", + "type": "probabilistic", + "param": 0.2 + }, + { + "operation": "op2", + "type": "probabilistic", + "param": 0.4 + } + ] + } + ], + "default_strategy": { + "type": "probabilistic", + "param": 0.5, + "operation_strategies": [ + { + "operation": "/health", + "type": "probabilistic", + "param": 0.0 + }, + { + "operation": "/metrics", + "type": "probabilistic", + "param": 0.0 + } + ] + } +} diff --git a/opentelemetry-api/Cargo.toml b/opentelemetry-api/Cargo.toml index 93fd875294..9f882ddd07 100644 --- a/opentelemetry-api/Cargo.toml +++ b/opentelemetry-api/Cargo.toml @@ -1,13 +1,15 @@ [package] name = "opentelemetry-api" version = "0.18.0" +license = "Apache-2.0" edition = "2018" [dependencies] fnv = { version = "1.0", optional = true } futures-channel = "0.3" futures-util = { version = "0.3", default-features = false, features = ["std", "sink"] } -lazy_static = "1.4" +indexmap = "=1.8" +once_cell = "1.12.0" pin-project = { version = "1.0.2", optional = true } thiserror = "1" tokio-stream = { version = "0.1", optional = true } diff --git a/opentelemetry-api/src/baggage.rs b/opentelemetry-api/src/baggage.rs index 4bd1045efe..21ff618b6e 100644 --- a/opentelemetry-api/src/baggage.rs +++ b/opentelemetry-api/src/baggage.rs @@ -15,12 +15,11 @@ //! //! [W3C Baggage]: https://w3c.github.io/baggage use crate::{Context, Key, KeyValue, Value}; +use once_cell::sync::Lazy; use std::collections::{hash_map, HashMap}; use std::iter::FromIterator; -lazy_static::lazy_static! { - static ref DEFAULT_BAGGAGE: Baggage = Baggage::default(); -} +static DEFAULT_BAGGAGE: Lazy = Lazy::new(Baggage::default); const MAX_KEY_VALUE_PAIRS: usize = 180; const MAX_BYTES_FOR_ONE_PAIR: usize = 4096; diff --git a/opentelemetry-api/src/common.rs b/opentelemetry-api/src/common.rs index d1b77c7894..44eae5cf1a 100644 --- a/opentelemetry-api/src/common.rs +++ b/opentelemetry-api/src/common.rs @@ -1,23 +1,35 @@ use std::borrow::Cow; -use std::fmt; +use std::sync::Arc; +use std::{fmt, hash}; /// The key part of attribute [KeyValue] pairs. /// /// See the [attribute naming] spec for guidelines. /// /// [attribute naming]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.9.0/specification/common/attribute-naming.md -#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub struct Key(Cow<'static, str>); +#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub struct Key(OtelString); impl Key { /// Create a new `Key`. - pub fn new>>(value: S) -> Self { - Key(value.into()) + /// + /// # Examples + /// + /// ``` + /// use opentelemetry_api::Key; + /// use std::sync::Arc; + /// + /// let key1 = Key::new("my_static_str"); + /// let key2 = Key::new(String::from("my_owned_string")); + /// let key3 = Key::new(Arc::from("my_ref_counted_str")); + /// ``` + pub fn new(value: impl Into) -> Self { + value.into() } /// Create a new const `Key`. pub const fn from_static_str(value: &'static str) -> Self { - Key(Cow::Borrowed(value)) + Key(OtelString::Static(value)) } /// Create a `KeyValue` pair for `bool` values. @@ -44,8 +56,8 @@ impl Key { } } - /// Create a `KeyValue` pair for `String` values. - pub fn string>>(self, value: T) -> KeyValue { + /// Create a `KeyValue` pair for string-like values. + pub fn string(self, value: impl Into) -> KeyValue { KeyValue { key: self, value: Value::String(value.into()), @@ -62,34 +74,95 @@ impl Key { /// Returns a reference to the underlying key name pub fn as_str(&self) -> &str { - self.0.as_ref() + self.0.as_str() } } impl From<&'static str> for Key { /// Convert a `&str` to a `Key`. fn from(key_str: &'static str) -> Self { - Key(Cow::from(key_str)) + Key(OtelString::Static(key_str)) } } impl From for Key { /// Convert a `String` to a `Key`. fn from(string: String) -> Self { - Key(Cow::from(string)) + Key(OtelString::Owned(string)) + } +} + +impl From> for Key { + /// Convert a `String` to a `Key`. + fn from(string: Arc) -> Self { + Key(OtelString::RefCounted(string)) + } +} + +impl fmt::Debug for Key { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(fmt) } } impl From for String { - /// Converts `Key` instances into `String`. fn from(key: Key) -> Self { - key.0.into_owned() + match key.0 { + OtelString::Owned(s) => s, + OtelString::Static(s) => s.to_string(), + OtelString::RefCounted(s) => s.to_string(), + } } } impl fmt::Display for Key { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - self.0.fmt(fmt) + match &self.0 { + OtelString::Owned(s) => s.fmt(fmt), + OtelString::Static(s) => s.fmt(fmt), + OtelString::RefCounted(s) => s.fmt(fmt), + } + } +} + +#[derive(Clone, Debug, Eq)] +enum OtelString { + Static(&'static str), + Owned(String), + RefCounted(Arc), +} + +impl OtelString { + fn as_str(&self) -> &str { + match self { + OtelString::Owned(s) => s.as_ref(), + OtelString::Static(s) => s, + OtelString::RefCounted(s) => s.as_ref(), + } + } +} + +impl PartialOrd for OtelString { + fn partial_cmp(&self, other: &Self) -> Option { + self.as_str().partial_cmp(other.as_str()) + } +} + +impl Ord for OtelString { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.as_str().cmp(other.as_str()) + } +} + +impl PartialEq for OtelString { + fn eq(&self, other: &Self) -> bool { + self.as_str().eq(other.as_str()) + } +} + +impl hash::Hash for OtelString { + fn hash(&self, state: &mut H) { + self.as_str().hash(state) } } @@ -103,7 +176,7 @@ pub enum Array { /// Array of floats F64(Vec), /// Array of strings - String(Vec>), + String(Vec), } impl fmt::Display for Array { @@ -118,7 +191,7 @@ impl fmt::Display for Array { if i > 0 { write!(fmt, ",")?; } - write!(fmt, "{:?}", t)?; + write!(fmt, "\"{}\"", t)?; } write!(fmt, "]") } @@ -153,7 +226,7 @@ into_array!( (Vec, Array::Bool), (Vec, Array::I64), (Vec, Array::F64), - (Vec>, Array::String), + (Vec, Array::String), ); /// The value part of attribute [KeyValue] pairs. @@ -166,11 +239,81 @@ pub enum Value { /// f64 values F64(f64), /// String values - String(Cow<'static, str>), + String(StringValue), /// Array of homogeneous values Array(Array), } +/// Wrapper for string-like values +#[derive(Clone, PartialEq, Hash)] +pub struct StringValue(OtelString); + +impl fmt::Debug for StringValue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +impl fmt::Display for StringValue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match &self.0 { + OtelString::Owned(s) => s.fmt(f), + OtelString::Static(s) => s.fmt(f), + OtelString::RefCounted(s) => s.fmt(f), + } + } +} + +impl AsRef for StringValue { + fn as_ref(&self) -> &str { + self.0.as_str() + } +} + +impl StringValue { + /// Returns a string slice to this value + pub fn as_str(&self) -> &str { + self.0.as_str() + } +} + +impl From for String { + fn from(s: StringValue) -> Self { + match s.0 { + OtelString::Owned(s) => s, + OtelString::Static(s) => s.to_string(), + OtelString::RefCounted(s) => s.to_string(), + } + } +} + +impl From<&'static str> for StringValue { + fn from(s: &'static str) -> Self { + StringValue(OtelString::Static(s)) + } +} + +impl From for StringValue { + fn from(s: String) -> Self { + StringValue(OtelString::Owned(s)) + } +} + +impl From> for StringValue { + fn from(s: Arc) -> Self { + StringValue(OtelString::RefCounted(s)) + } +} + +impl From> for StringValue { + fn from(s: Cow<'static, str>) -> Self { + match s { + Cow::Owned(s) => StringValue(OtelString::Owned(s)), + Cow::Borrowed(s) => StringValue(OtelString::Static(s)), + } + } +} + impl Value { /// String representation of the `Value` /// @@ -180,7 +323,7 @@ impl Value { Value::Bool(v) => format!("{}", v).into(), Value::I64(v) => format!("{}", v).into(), Value::F64(v) => format!("{}", v).into(), - Value::String(v) => Cow::Borrowed(v.as_ref()), + Value::String(v) => Cow::Borrowed(v.as_str()), Value::Array(v) => format!("{}", v).into(), } } @@ -206,31 +349,41 @@ from_values!( (bool, Value::Bool); (i64, Value::I64); (f64, Value::F64); - (Cow<'static, str>, Value::String); + (StringValue, Value::String); ); impl From<&'static str> for Value { - /// Convenience method for creating a `Value` from a `&'static str`. fn from(s: &'static str) -> Self { Value::String(s.into()) } } impl From for Value { - /// Convenience method for creating a `Value` from a `String`. fn from(s: String) -> Self { Value::String(s.into()) } } +impl From> for Value { + fn from(s: Arc) -> Self { + Value::String(s.into()) + } +} + +impl From> for Value { + fn from(s: Cow<'static, str>) -> Self { + Value::String(s.into()) + } +} + impl fmt::Display for Value { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - Value::Bool(v) => fmt.write_fmt(format_args!("{}", v)), - Value::I64(v) => fmt.write_fmt(format_args!("{}", v)), - Value::F64(v) => fmt.write_fmt(format_args!("{}", v)), - Value::String(v) => fmt.write_fmt(format_args!("{}", v)), - Value::Array(v) => fmt.write_fmt(format_args!("{}", v)), + Value::Bool(v) => v.fmt(fmt), + Value::I64(v) => v.fmt(fmt), + Value::F64(v) => v.fmt(fmt), + Value::String(v) => fmt.write_str(v.as_str()), + Value::Array(v) => v.fmt(fmt), } } } diff --git a/opentelemetry-api/src/global/error_handler.rs b/opentelemetry-api/src/global/error_handler.rs index d2c8f91ca8..5ff8783eea 100644 --- a/opentelemetry-api/src/global/error_handler.rs +++ b/opentelemetry-api/src/global/error_handler.rs @@ -5,11 +5,9 @@ use std::sync::RwLock; use crate::metrics::MetricsError; #[cfg(feature = "trace")] use crate::trace::TraceError; +use once_cell::sync::Lazy; -lazy_static::lazy_static! { - /// The global error handler. - static ref GLOBAL_ERROR_HANDLER: RwLock> = RwLock::new(None); -} +static GLOBAL_ERROR_HANDLER: Lazy>> = Lazy::new(|| RwLock::new(None)); /// Wrapper for error from both tracing and metrics part of open telemetry. #[derive(thiserror::Error, Debug)] diff --git a/opentelemetry-api/src/global/metrics.rs b/opentelemetry-api/src/global/metrics.rs index 0761a1332e..35d1325333 100644 --- a/opentelemetry-api/src/global/metrics.rs +++ b/opentelemetry-api/src/global/metrics.rs @@ -1,10 +1,13 @@ use crate::metrics::{self, Meter, MeterProvider}; +use once_cell::sync::Lazy; use std::sync::{Arc, RwLock}; -lazy_static::lazy_static! { - /// The global `Meter` provider singleton. - static ref GLOBAL_METER_PROVIDER: RwLock = RwLock::new(GlobalMeterProvider::new(metrics::noop::NoopMeterProvider::new())); -} +/// The global `Meter` provider singleton. +static GLOBAL_METER_PROVIDER: Lazy> = Lazy::new(|| { + RwLock::new(GlobalMeterProvider::new( + metrics::noop::NoopMeterProvider::new(), + )) +}); /// Represents the globally configured [`MeterProvider`] instance for this /// application. diff --git a/opentelemetry-api/src/global/propagation.rs b/opentelemetry-api/src/global/propagation.rs index 76ecdd268f..30d5b8f86b 100644 --- a/opentelemetry-api/src/global/propagation.rs +++ b/opentelemetry-api/src/global/propagation.rs @@ -1,13 +1,15 @@ use crate::propagation::TextMapPropagator; use crate::trace::noop::NoopTextMapPropagator; +use once_cell::sync::Lazy; use std::sync::RwLock; -lazy_static::lazy_static! { - /// The current global `TextMapPropagator` propagator. - static ref GLOBAL_TEXT_MAP_PROPAGATOR: RwLock> = RwLock::new(Box::new(NoopTextMapPropagator::new())); - /// The global default `TextMapPropagator` propagator. - static ref DEFAULT_TEXT_MAP_PROPAGATOR: NoopTextMapPropagator = NoopTextMapPropagator::new(); -} +/// The current global `TextMapPropagator` propagator. +static GLOBAL_TEXT_MAP_PROPAGATOR: Lazy>> = + Lazy::new(|| RwLock::new(Box::new(NoopTextMapPropagator::new()))); + +/// The global default `TextMapPropagator` propagator. +static DEFAULT_TEXT_MAP_PROPAGATOR: Lazy = + Lazy::new(NoopTextMapPropagator::new); /// Sets the given [`TextMapPropagator`] propagator as the current global propagator. pub fn set_text_map_propagator(propagator: P) { diff --git a/opentelemetry-api/src/global/trace.rs b/opentelemetry-api/src/global/trace.rs index 27dae44178..a55e0ff8dd 100644 --- a/opentelemetry-api/src/global/trace.rs +++ b/opentelemetry-api/src/global/trace.rs @@ -1,5 +1,6 @@ use crate::trace::{noop::NoopTracerProvider, SpanContext, Status}; use crate::{trace, trace::TracerProvider, Context, KeyValue}; +use once_cell::sync::Lazy; use std::borrow::Cow; use std::fmt; use std::mem; @@ -352,10 +353,12 @@ impl trace::TracerProvider for GlobalTracerProvider { } } -lazy_static::lazy_static! { - /// The global `Tracer` provider singleton. - static ref GLOBAL_TRACER_PROVIDER: RwLock = RwLock::new(GlobalTracerProvider::new(trace::noop::NoopTracerProvider::new())); -} +/// The global `Tracer` provider singleton. +static GLOBAL_TRACER_PROVIDER: Lazy> = Lazy::new(|| { + RwLock::new(GlobalTracerProvider::new( + trace::noop::NoopTracerProvider::new(), + )) +}); /// Returns an instance of the currently configured global [`TracerProvider`] through /// [`GlobalTracerProvider`]. diff --git a/opentelemetry-api/src/lib.rs b/opentelemetry-api/src/lib.rs index a736ab7896..9d2a56ce0b 100644 --- a/opentelemetry-api/src/lib.rs +++ b/opentelemetry-api/src/lib.rs @@ -55,7 +55,7 @@ mod common; #[doc(hidden)] pub mod testing; -pub use common::{Array, ExportError, InstrumentationLibrary, Key, KeyValue, Value}; +pub use common::{Array, ExportError, InstrumentationLibrary, Key, KeyValue, StringValue, Value}; #[cfg(feature = "metrics")] #[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] diff --git a/opentelemetry-api/src/metrics/noop.rs b/opentelemetry-api/src/metrics/noop.rs index 14aa71310a..3e2a1dd64b 100644 --- a/opentelemetry-api/src/metrics/noop.rs +++ b/opentelemetry-api/src/metrics/noop.rs @@ -14,12 +14,20 @@ use crate::{ }, Context, KeyValue, }; +use once_cell::sync::Lazy; use std::any::Any; use std::sync::Arc; -lazy_static::lazy_static! { - static ref NOOP_DESCRIPTOR: Descriptor = Descriptor::new(String::new(), "noop", None, None, InstrumentKind::Counter, NumberKind::U64); -} +static NOOP_DESCRIPTOR: Lazy = Lazy::new(|| { + Descriptor::new( + String::new(), + "noop", + None, + None, + InstrumentKind::Counter, + NumberKind::U64, + ) +}); /// A no-op instance of a `MetricProvider` #[derive(Debug, Default)] diff --git a/opentelemetry-api/src/trace/context.rs b/opentelemetry-api/src/trace/context.rs index e349e8117f..9d8f047f21 100644 --- a/opentelemetry-api/src/trace/context.rs +++ b/opentelemetry-api/src/trace/context.rs @@ -5,21 +5,20 @@ use crate::{ Context, ContextGuard, KeyValue, }; use futures_util::{sink::Sink, stream::Stream}; +use once_cell::sync::Lazy; use pin_project::pin_project; -use std::error::Error; -use std::sync::Mutex; use std::{ borrow::Cow, + error::Error, pin::Pin, + sync::Mutex, task::{Context as TaskContext, Poll}, }; -lazy_static::lazy_static! { - static ref NOOP_SPAN: SynchronizedSpan = SynchronizedSpan { - span_context: SpanContext::empty_context(), - inner: None, - }; -} +static NOOP_SPAN: Lazy = Lazy::new(|| SynchronizedSpan { + span_context: SpanContext::empty_context(), + inner: None, +}); /// A reference to the currently active span in this context. #[derive(Debug)] diff --git a/opentelemetry-api/src/trace/mod.rs b/opentelemetry-api/src/trace/mod.rs index ef377fa5ee..a2825df241 100644 --- a/opentelemetry-api/src/trace/mod.rs +++ b/opentelemetry-api/src/trace/mod.rs @@ -168,6 +168,7 @@ use thiserror::Error; mod context; pub mod noop; +mod order_map; mod span; mod span_context; mod tracer; @@ -175,6 +176,7 @@ mod tracer_provider; pub use self::{ context::{get_active_span, mark_span_as_active, FutureExt, SpanRef, TraceContextExt}, + order_map::OrderMap, span::{Span, SpanKind, Status}, span_context::{SpanContext, SpanId, TraceFlags, TraceId, TraceState}, tracer::{SamplingDecision, SamplingResult, SpanBuilder, Tracer}, diff --git a/opentelemetry-api/src/trace/order_map.rs b/opentelemetry-api/src/trace/order_map.rs new file mode 100644 index 0000000000..b69cc7090d --- /dev/null +++ b/opentelemetry-api/src/trace/order_map.rs @@ -0,0 +1,670 @@ +use crate::{Key, KeyValue, Value}; +use indexmap::map::{ + Drain, Entry, IntoIter, IntoKeys, IntoValues, Iter, IterMut, Keys, Values, ValuesMut, +}; +use indexmap::{Equivalent, IndexMap}; +use std::collections::hash_map::RandomState; +use std::hash::{BuildHasher, Hash}; +use std::iter::FromIterator; +use std::ops::{Index, IndexMut, RangeBounds}; + +/// A hash table implementation that preserves insertion order across all operations. +/// +/// Entries will be returned according to their insertion order when iterating over the collection. +#[derive(Clone, Debug)] +pub struct OrderMap(IndexMap); + +impl OrderMap { + /// Create a new map. (Does not allocate) + #[inline] + pub fn new() -> Self { + Self(IndexMap::new()) + } + + /// Create a new map with capacity for `n` key-value pairs. (Does not + /// allocate if `n` is zero.) + /// + /// Computes in **O(n)** time. + #[inline] + pub fn with_capacity(n: usize) -> Self { + Self(IndexMap::with_capacity(n)) + } +} + +impl OrderMap { + /// Create a new map with capacity for `n` key-value pairs. (Does not + /// allocate if `n` is zero.) + /// + /// Computes in **O(n)** time. + #[inline] + pub fn with_capacity_and_hasher(n: usize, hash_builder: S) -> Self { + Self(IndexMap::with_capacity_and_hasher(n, hash_builder)) + } + + /// Create a new map with `hash_builder`. + /// + /// This function is `const`, so it + /// can be called in `static` contexts. + pub const fn with_hasher(hash_builder: S) -> Self { + Self(IndexMap::with_hasher(hash_builder)) + } + + /// Computes in **O(1)** time. + pub fn capacity(&self) -> usize { + self.0.capacity() + } + + /// Return a reference to the map's `BuildHasher`. + pub fn hasher(&self) -> &S { + self.0.hasher() + } + + /// Return the number of key-value pairs in the map. + /// + /// Computes in **O(1)** time. + #[inline] + pub fn len(&self) -> usize { + self.0.len() + } + + /// Returns true if the map contains no elements. + /// + /// Computes in **O(1)** time. + #[inline] + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Return an iterator over the key-value pairs of the map, in their order + pub fn iter(&self) -> Iter<'_, K, V> { + self.0.iter() + } + + /// Return an iterator over the key-value pairs of the map, in their order + pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { + self.0.iter_mut() + } + + /// Return an iterator over the keys of the map, in their order + pub fn keys(&self) -> Keys<'_, K, V> { + self.0.keys() + } + + /// Return an owning iterator over the keys of the map, in their order + pub fn into_keys(self) -> IntoKeys { + self.0.into_keys() + } + + /// Return an iterator over the values of the map, in their order + pub fn values(&self) -> Values<'_, K, V> { + self.0.values() + } + + /// Return an iterator over mutable references to the values of the map, + /// in their order + pub fn values_mut(&mut self) -> ValuesMut<'_, K, V> { + self.0.values_mut() + } + + /// Return an owning iterator over the values of the map, in their order + pub fn into_values(self) -> IntoValues { + self.0.into_values() + } + + /// Remove all key-value pairs in the map, while preserving its capacity. + /// + /// Computes in **O(n)** time. + pub fn clear(&mut self) { + self.0.clear(); + } + + /// Shortens the map, keeping the first `len` elements and dropping the rest. + /// + /// If `len` is greater than the map's current length, this has no effect. + pub fn truncate(&mut self, len: usize) { + self.0.truncate(len); + } + + /// Clears the `IndexMap` in the given index range, returning those + /// key-value pairs as a drain iterator. + /// + /// The range may be any type that implements `RangeBounds`, + /// including all of the `std::ops::Range*` types, or even a tuple pair of + /// `Bound` start and end values. To drain the map entirely, use `RangeFull` + /// like `map.drain(..)`. + /// + /// This shifts down all entries following the drained range to fill the + /// gap, and keeps the allocated memory for reuse. + /// + /// ***Panics*** if the starting point is greater than the end point or if + /// the end point is greater than the length of the map. + pub fn drain(&mut self, range: R) -> Drain<'_, K, V> + where + R: RangeBounds, + { + self.0.drain(range) + } + + /// Splits the collection into two at the given index. + /// + /// Returns a newly allocated map containing the elements in the range + /// `[at, len)`. After the call, the original map will be left containing + /// the elements `[0, at)` with its previous capacity unchanged. + /// + /// ***Panics*** if `at > len`. + pub fn split_off(&mut self, at: usize) -> Self + where + S: Clone, + { + Self(self.0.split_off(at)) + } +} + +impl OrderMap +where + K: Hash + Eq, + S: BuildHasher, +{ + /// Reserve capacity for `additional` more key-value pairs. + /// + /// Computes in **O(n)** time. + pub fn reserve(&mut self, additional: usize) { + self.0.reserve(additional) + } + + /// Shrink the capacity of the map as much as possible. + /// + /// Computes in **O(n)** time. + pub fn shrink_to_fit(&mut self) { + self.0.shrink_to_fit() + } + + /// Insert a key-value pair in the map. + /// + /// If an equivalent key already exists in the map: the key remains and + /// retains in its place in the order, its corresponding value is updated + /// with `value` and the older value is returned inside `Some(_)`. + /// + /// If no equivalent key existed in the map: the new key-value pair is + /// inserted, last in order, and `None` is returned. + /// + /// Computes in **O(1)** time (amortized average). + /// + /// See also [`entry`](#method.entry) if you you want to insert *or* modify + /// or if you need to get the index of the corresponding key-value pair. + pub fn insert(&mut self, key: K, value: V) -> Option { + self.0.insert(key, value) + } + + /// Insert a key-value pair in the map, and get their index. + /// + /// If an equivalent key already exists in the map: the key remains and + /// retains in its place in the order, its corresponding value is updated + /// with `value` and the older value is returned inside `(index, Some(_))`. + /// + /// If no equivalent key existed in the map: the new key-value pair is + /// inserted, last in order, and `(index, None)` is returned. + /// + /// Computes in **O(1)** time (amortized average). + /// + /// See also [`entry`](#method.entry) if you you want to insert *or* modify + /// or if you need to get the index of the corresponding key-value pair. + pub fn insert_full(&mut self, key: K, value: V) -> (usize, Option) { + self.0.insert_full(key, value) + } + + /// Get the given key’s corresponding entry in the map for insertion and/or + /// in-place manipulation. + /// + /// Computes in **O(1)** time (amortized average). + pub fn entry(&mut self, key: K) -> Entry<'_, K, V> { + self.0.entry(key) + } + + /// Return `true` if an equivalent to `key` exists in the map. + /// + /// Computes in **O(1)** time (average). + pub fn contains_key(&self, key: &Q) -> bool + where + Q: Hash + Equivalent, + { + self.0.contains_key(key) + } + + /// Return a reference to the value stored for `key`, if it is present, + /// else `None`. + /// + /// Computes in **O(1)** time (average). + pub fn get(&self, key: &Q) -> Option<&V> + where + Q: Hash + Equivalent, + { + self.0.get(key) + } + + /// Return references to the key-value pair stored for `key`, + /// if it is present, else `None`. + /// + /// Computes in **O(1)** time (average). + pub fn get_key_value(&self, key: &Q) -> Option<(&K, &V)> + where + Q: Hash + Equivalent, + { + self.0.get_key_value(key) + } + + /// Return item index, key and value + pub fn get_full(&self, key: &Q) -> Option<(usize, &K, &V)> + where + Q: Hash + Equivalent, + { + self.0.get_full(key) + } + + /// Return item index, if it exists in the map + /// + /// Computes in **O(1)** time (average). + pub fn get_index_of(&self, key: &Q) -> Option + where + Q: Hash + Equivalent, + { + self.0.get_index_of(key) + } + + /// Return a mutable reference to the element pointed at by `key`, if it exists. + pub fn get_mut(&mut self, key: &Q) -> Option<&mut V> + where + Q: Hash + Equivalent, + { + self.0.get_mut(key) + } + + /// Return a mutable reference to the element pointed at by `key`, if it exists. + /// It also returns the element's index and its key. + pub fn get_full_mut(&mut self, key: &Q) -> Option<(usize, &K, &mut V)> + where + Q: Hash + Equivalent, + { + self.0.get_full_mut(key) + } + + /// Remove the key-value pair equivalent to `key` and return + /// its value. + /// + /// Like `Vec::remove`, the pair is removed by shifting all of the + /// elements that follow it, preserving their relative order. + /// **This perturbs the index of all of those elements!** + /// + /// Return `None` if `key` is not in map. + /// + /// Computes in **O(n)** time (average). + pub fn shift_remove(&mut self, key: &Q) -> Option + where + Q: Hash + Equivalent, + { + self.0.shift_remove(key) + } + + /// Remove and return the key-value pair equivalent to `key`. + /// + /// Like `Vec::remove`, the pair is removed by shifting all of the + /// elements that follow it, preserving their relative order. + /// **This perturbs the index of all of those elements!** + /// + /// Return `None` if `key` is not in map. + /// + /// Computes in **O(n)** time (average). + pub fn shift_remove_entry(&mut self, key: &Q) -> Option<(K, V)> + where + Q: Hash + Equivalent, + { + self.0.shift_remove_entry(key) + } + + /// Remove the key-value pair equivalent to `key` and return it and + /// the index it had. + /// + /// Like `Vec::remove`, the pair is removed by shifting all of the + /// elements that follow it, preserving their relative order. + /// **This perturbs the index of all of those elements!** + /// + /// Return `None` if `key` is not in map. + /// + /// Computes in **O(n)** time (average). + pub fn shift_remove_full(&mut self, key: &Q) -> Option<(usize, K, V)> + where + Q: Hash + Equivalent, + { + self.0.shift_remove_full(key) + } + + /// Remove the last key-value pair + /// + /// This preserves the order of the remaining elements. + /// + /// Computes in **O(1)** time (average). + pub fn pop(&mut self) -> Option<(K, V)> { + self.0.pop() + } + + /// Scan through each key-value pair in the map and keep those where the + /// closure `keep` returns `true`. + /// + /// The elements are visited in order, and remaining elements keep their + /// order. + /// + /// Computes in **O(n)** time (average). + pub fn retain(&mut self, keep: F) + where + F: FnMut(&K, &mut V) -> bool, + { + self.0.retain(keep); + } +} + +impl OrderMap { + /// Get a key-value pair by index + /// + /// Valid indices are *0 <= index < self.len()* + /// + /// Computes in **O(1)** time. + pub fn get_index(&self, index: usize) -> Option<(&K, &V)> { + self.0.get_index(index) + } + + /// Get a key-value pair by index + /// + /// Valid indices are *0 <= index < self.len()* + /// + /// Computes in **O(1)** time. + pub fn get_index_mut(&mut self, index: usize) -> Option<(&mut K, &mut V)> { + self.0.get_index_mut(index) + } + + /// Get the first key-value pair + /// + /// Computes in **O(1)** time. + pub fn first(&self) -> Option<(&K, &V)> { + self.0.first() + } + + /// Get the first key-value pair, with mutable access to the value + /// + /// Computes in **O(1)** time. + pub fn first_mut(&mut self) -> Option<(&K, &mut V)> { + self.0.first_mut() + } + + /// Get the last key-value pair + /// + /// Computes in **O(1)** time. + pub fn last(&self) -> Option<(&K, &V)> { + self.0.last() + } + + /// Get the last key-value pair, with mutable access to the value + /// + /// Computes in **O(1)** time. + pub fn last_mut(&mut self) -> Option<(&K, &mut V)> { + self.0.last_mut() + } + + /// Remove the key-value pair by index + /// + /// Valid indices are *0 <= index < self.len()* + /// + /// Like `Vec::remove`, the pair is removed by shifting all of the + /// elements that follow it, preserving their relative order. + /// **This perturbs the index of all of those elements!** + /// + /// Computes in **O(n)** time (average). + pub fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> { + self.0.shift_remove_index(index) + } +} + +impl<'a, K, V, S> IntoIterator for &'a OrderMap { + type Item = (&'a K, &'a V); + type IntoIter = Iter<'a, K, V>; + fn into_iter(self) -> Self::IntoIter { + self.0.iter() + } +} + +impl<'a, K, V, S> IntoIterator for &'a mut OrderMap { + type Item = (&'a K, &'a mut V); + type IntoIter = IterMut<'a, K, V>; + fn into_iter(self) -> Self::IntoIter { + self.0.iter_mut() + } +} + +impl IntoIterator for OrderMap { + type Item = (K, V); + type IntoIter = IntoIter; + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +/// Access `OrderMap` values corresponding to a key. +/// +/// Panics if the value is missing. +impl Index<&Q> for OrderMap +where + Q: Hash + Equivalent, + K: Hash + Eq, + S: BuildHasher, +{ + type Output = V; + + /// Returns a reference to the value corresponding to the supplied `key`. + /// + /// ***Panics*** if `key` is not present in the map. + fn index(&self, key: &Q) -> &V { + self.0.index(key) + } +} + +/// Access `Ordermap` values corresponding to a key. +/// +/// Mutable indexing allows changing / updating values of key-value +/// pairs that are already present. +/// +/// You can **not** insert new pairs with index syntax, use `.insert()`. +impl IndexMut<&Q> for OrderMap +where + Q: Hash + Equivalent, + K: Hash + Eq, + S: BuildHasher, +{ + /// Returns a mutable reference to the value corresponding to the supplied `key`. + /// + /// ***Panics*** if `key` is not present in the map. + fn index_mut(&mut self, key: &Q) -> &mut V { + self.0.index_mut(key) + } +} + +/// Access `IndexMap` values at indexed positions. +/// +/// It panics if the index is out of bounds. +impl Index for OrderMap { + type Output = V; + + /// Returns a reference to the value at the supplied `index`. + /// + /// ***Panics*** if `index` is out of bounds. + fn index(&self, index: usize) -> &V { + self.0.index(index) + } +} + +/// Access `IndexMap` values at indexed positions. +/// +/// Mutable indexing allows changing / updating indexed values +/// that are already present. +/// +/// You can **not** insert new values with index syntax, use `.insert()`. +/// +/// # Examples +/// +/// ``` +/// use indexmap::IndexMap; +/// +/// let mut map = IndexMap::new(); +/// for word in "Lorem ipsum dolor sit amet".split_whitespace() { +/// map.insert(word.to_lowercase(), word.to_string()); +/// } +/// let lorem = &mut map[0]; +/// assert_eq!(lorem, "Lorem"); +/// lorem.retain(char::is_lowercase); +/// assert_eq!(map["lorem"], "orem"); +/// ``` +/// +/// ```should_panic +/// use indexmap::IndexMap; +/// +/// let mut map = IndexMap::new(); +/// map.insert("foo", 1); +/// map[10] = 1; // panics! +/// ``` +impl IndexMut for OrderMap { + /// Returns a mutable reference to the value at the supplied `index`. + /// + /// ***Panics*** if `index` is out of bounds. + fn index_mut(&mut self, index: usize) -> &mut V { + self.0.index_mut(index) + } +} + +impl FromIterator<(K, V)> for OrderMap +where + K: Hash + Eq, + S: BuildHasher + Default, +{ + /// Create an `OrderMap` from the sequence of key-value pairs in the + /// iterable. + /// + /// `from_iter` uses the same logic as `extend`. See + /// [`extend`](#method.extend) for more details. + fn from_iter>(iterable: I) -> Self { + Self(IndexMap::from_iter(iterable)) + } +} + +// todo: uncomment when the MSRV bumps +// impl From<[(K, V); N]> for OrderMap +// where +// K: Hash + Eq, +// { +// fn from(arr: [(K, V); N]) -> Self { +// Self(IndexMap::from(arr)) +// } +// } + +impl Extend<(K, V)> for OrderMap +where + K: Hash + Eq, + S: BuildHasher, +{ + /// Extend the map with all key-value pairs in the iterable. + /// + /// This is equivalent to calling [`insert`](#method.insert) for each of + /// them in order, which means that for keys that already existed + /// in the map, their value is updated but it keeps the existing order. + /// + /// New keys are inserted in the order they appear in the sequence. If + /// equivalents of a key occur more than once, the last corresponding value + /// prevails. + fn extend>(&mut self, iterable: I) { + self.0.extend(iterable) + } +} + +impl<'a, K, V, S> Extend<(&'a K, &'a V)> for OrderMap +where + K: 'a + Hash + Eq + Copy, + V: 'a + Copy, + S: BuildHasher, +{ + /// Extend the map with all key-value pairs in the iterable. + /// + /// See the first extend method for more details. + fn extend>(&mut self, iterable: I) { + self.0.extend(iterable) + } +} + +impl Default for OrderMap +where + S: Default, +{ + /// Return an empty `OrderMap` + fn default() -> Self { + Self(IndexMap::default()) + } +} + +impl PartialEq> for OrderMap +where + K: Hash + Eq, + V1: PartialEq, + S1: BuildHasher, + S2: BuildHasher, +{ + fn eq(&self, other: &OrderMap) -> bool { + self.0.eq(&other.0) + } +} + +impl Eq for OrderMap +where + K: Eq + Hash, + V: Eq, + S: BuildHasher, +{ +} + +impl FromIterator for OrderMap +where + S: BuildHasher + Default, +{ + /// Create an `OrderMap` from the sequence of key-value pairs in the + /// iterable. + /// + /// `from_iter` uses the same logic as `extend`. See + /// [`extend`](#method.extend) for more details. + fn from_iter>(iterable: I) -> Self { + Self(IndexMap::from_iter( + iterable.into_iter().map(|kv| (kv.key, kv.value)), + )) + } +} + +// todo: uncomment below when bumping MSRV +// impl From<[KeyValue; N]> for OrderMap { +// fn from(arr: [KeyValue; N]) -> Self { +// let arr = arr.map(|kv| (kv.key, kv.value)); +// Self(IndexMap::from(arr)) +// } +// } + +impl Extend for OrderMap +where + S: BuildHasher, +{ + /// Extend the map with all key-value pairs in the iterable. + /// + /// This is equivalent to calling [`insert`](#method.insert) for each of + /// them in order, which means that for keys that already existed + /// in the map, their value is updated but it keeps the existing order. + /// + /// New keys are inserted in the order they appear in the sequence. If + /// equivalents of a key occur more than once, the last corresponding value + /// prevails. + fn extend>(&mut self, iterable: I) { + self.0 + .extend(iterable.into_iter().map(|kv| (kv.key, kv.value))) + } +} diff --git a/opentelemetry-api/src/trace/tracer.rs b/opentelemetry-api/src/trace/tracer.rs index 1596d62b37..e36d32c5c5 100644 --- a/opentelemetry-api/src/trace/tracer.rs +++ b/opentelemetry-api/src/trace/tracer.rs @@ -1,8 +1,10 @@ +use crate::trace::OrderMap; use crate::{ trace::{Event, Link, Span, SpanId, SpanKind, Status, TraceContextExt, TraceId, TraceState}, - Context, KeyValue, + Context, Key, KeyValue, Value, }; use std::borrow::Cow; +use std::iter::FromIterator; use std::time::SystemTime; /// The interface for constructing [`Span`]s. @@ -259,7 +261,7 @@ pub struct SpanBuilder { pub end_time: Option, /// Span attributes - pub attributes: Option>, + pub attributes: Option>, /// Span events pub events: Option>, @@ -324,8 +326,25 @@ impl SpanBuilder { } } - /// Assign span attributes - pub fn with_attributes(self, attributes: Vec) -> Self { + /// Assign span attributes from an iterable. + /// + /// Check out [`SpanBuilder::with_attributes_map`] to assign span attributes + /// via an [`OrderMap`] instance. + pub fn with_attributes(self, attributes: I) -> Self + where + I: IntoIterator, + { + SpanBuilder { + attributes: Some(OrderMap::from_iter(attributes.into_iter())), + ..self + } + } + + /// Assign span attributes. + /// + /// Check out [`SpanBuilder::with_attributes`] to assign span attributes + /// from an iterable of [`KeyValue`]s. + pub fn with_attributes_map(self, attributes: OrderMap) -> Self { SpanBuilder { attributes: Some(attributes), ..self diff --git a/opentelemetry-aws/Cargo.toml b/opentelemetry-aws/Cargo.toml index fca3bd362a..c3ddaa4904 100644 --- a/opentelemetry-aws/Cargo.toml +++ b/opentelemetry-aws/Cargo.toml @@ -22,8 +22,8 @@ default = ["trace"] trace = ["opentelemetry/trace"] [dependencies] -opentelemetry = { version = "0.18.0", path = "../opentelemetry", features = ["trace"] } -lazy_static = "1.4" +once_cell = "1.12" +opentelemetry = { version = "0.18", path = "../opentelemetry", features = ["trace"] } [dev-dependencies] opentelemetry = { path = "../opentelemetry", features = ["trace", "testing"] } diff --git a/opentelemetry-aws/src/lib.rs b/opentelemetry-aws/src/lib.rs index 2156e5c448..89a6d2d2d1 100644 --- a/opentelemetry-aws/src/lib.rs +++ b/opentelemetry-aws/src/lib.rs @@ -36,6 +36,7 @@ //! A more detailed example can be found in [opentelemetry-rust](https://github.com/open-telemetry/opentelemetry-rust/tree/main/examples/aws-xray) repo #[cfg(feature = "trace")] pub mod trace { + use once_cell::sync::Lazy; use opentelemetry::{ global::{self, Error}, propagation::{text_map_propagator::FieldIter, Extractor, Injector, TextMapPropagator}, @@ -58,9 +59,8 @@ pub mod trace { const TRACE_FLAG_DEFERRED: TraceFlags = TraceFlags::new(0x02); - lazy_static::lazy_static! { - static ref AWS_XRAY_HEADER_FIELD: [String; 1] = [AWS_XRAY_TRACE_HEADER.to_string()]; - } + static AWS_XRAY_HEADER_FIELD: Lazy<[String; 1]> = + Lazy::new(|| [AWS_XRAY_TRACE_HEADER.to_owned()]); /// Extracts and injects `SpanContext`s into `Extractor`s or `Injector`s using AWS X-Ray header format. /// diff --git a/opentelemetry-contrib/Cargo.toml b/opentelemetry-contrib/Cargo.toml index b0ea31abf0..69db8beffd 100644 --- a/opentelemetry-contrib/Cargo.toml +++ b/opentelemetry-contrib/Cargo.toml @@ -21,11 +21,19 @@ rustdoc-args = ["--cfg", "docsrs"] default = [] base64_format = ["base64", "binary_propagator"] binary_propagator = [] +jaeger_json_exporter = ["serde_json", "futures", "async-trait"] +rt-tokio = ["tokio", "opentelemetry/rt-tokio"] +rt-tokio-current-thread = ["tokio", "opentelemetry/rt-tokio-current-thread"] +rt-async-std = ["async-std", "opentelemetry/rt-async-std"] [dependencies] -opentelemetry = { version = "0.18.0", path = "../opentelemetry", features = ["trace"] } +async-std = { version = "1.10", optional = true } +async-trait = { version = "0.1", optional = true } base64 = { version = "0.13", optional = true } -lazy_static = "1.4" +futures = { version = "0.3", optional = true } +opentelemetry = { version = "0.18", path = "../opentelemetry", features = ["trace"] } +serde_json = { version = "1", optional = true } +tokio = { version = "1.0", features = ["fs", "io-util"], optional = true } [dev-dependencies] base64 = "0.13" diff --git a/opentelemetry-contrib/src/trace/exporter/jaeger_json.rs b/opentelemetry-contrib/src/trace/exporter/jaeger_json.rs new file mode 100644 index 0000000000..35f0e9631d --- /dev/null +++ b/opentelemetry-contrib/src/trace/exporter/jaeger_json.rs @@ -0,0 +1,299 @@ +//! # Jaeger JSON file Exporter +//! + +use async_trait::async_trait; +use futures::{future::BoxFuture, FutureExt}; +use opentelemetry::sdk::export::trace::{ExportResult, SpanData, SpanExporter}; +use opentelemetry::sdk::trace::{TraceRuntime, Tracer}; +use opentelemetry::trace::TraceError; +use std::collections::HashMap; +use std::path::{Path, PathBuf}; +use std::time::SystemTime; + +/// An exporter for jaeger comptible json files containing trace data +#[derive(Debug)] +pub struct JaegerJsonExporter { + out_path: PathBuf, + file_prefix: String, + service_name: String, + runtime: R, +} + +impl JaegerJsonExporter { + /// Configure a new jaeger-json exporter + /// + /// * `out_path` refers to an directory where span data are written. If it does not exist, it is created by the exporter + /// * `file_prefix` refers to a prefix prependend to each span file + /// * `service_name` is used to identify the corresponding service in jaeger + /// * `runtime` specifies the used async runtime to write the trace data + pub fn new(out_path: PathBuf, file_prefix: String, service_name: String, runtime: R) -> Self { + Self { + out_path, + file_prefix, + service_name, + runtime, + } + } + + /// Install the exporter using the internal provided runtime + pub fn install_batch(self) -> Tracer { + use opentelemetry::trace::TracerProvider; + + let runtime = self.runtime.clone(); + let provider_builder = + opentelemetry::sdk::trace::TracerProvider::builder().with_batch_exporter(self, runtime); + + let provider = provider_builder.build(); + + let tracer = + provider.versioned_tracer("opentelemetry", Some(env!("CARGO_PKG_VERSION")), None); + let _ = opentelemetry::global::set_tracer_provider(provider); + + tracer + } +} + +impl SpanExporter for JaegerJsonExporter { + fn export(&mut self, batch: Vec) -> BoxFuture<'static, ExportResult> { + let mut trace_map = HashMap::new(); + + for span in batch { + let ctx = &span.span_context; + trace_map + .entry(ctx.trace_id()) + .or_insert_with(Vec::new) + .push(span_data_to_jaeger_json(span)); + } + + let data = trace_map + .into_iter() + .map(|(trace_id, spans)| { + serde_json::json!({ + "traceID": trace_id.to_string(), + "spans": spans, + "processes": { + "p1": { + "serviceName": self.service_name, + "tags": [] + } + } + }) + }) + .collect::>(); + + let json = serde_json::json!({ + "data": data, + }); + + let runtime = self.runtime.clone(); + let out_path = self.out_path.clone(); + let file_prefix = self.file_prefix.clone(); + + async move { + runtime.create_dir(&out_path).await?; + + let file_name = out_path.join(format!( + "{}-{}.json", + file_prefix, + SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .expect("This does not fail") + .as_secs() + )); + runtime + .write_to_file( + &file_name, + &serde_json::to_vec(&json).expect("This is a valid json value"), + ) + .await?; + + Ok(()) + } + .boxed() + } +} + +fn span_data_to_jaeger_json( + span: opentelemetry::sdk::export::trace::SpanData, +) -> serde_json::Value { + let events = span + .events + .iter() + .map(|e| { + let mut fields = e + .attributes + .iter() + .map(|a| { + let (tpe, value) = opentelemetry_value_to_json(&a.value); + serde_json::json!({ + "key": a.key.as_str(), + "type": tpe, + "value": value, + }) + }) + .collect::>(); + fields.push(serde_json::json!({ + "key": "event", + "type": "string", + "value": e.name, + })); + + serde_json::json!({ + "timestamp": e.timestamp.duration_since(SystemTime::UNIX_EPOCH).expect("This does not fail").as_micros() as i64, + "fields": fields, + }) + }) + .collect::>(); + let tags = span + .attributes + .iter() + .map(|(key, value)| { + let (tpe, value) = opentelemetry_value_to_json(value); + serde_json::json!({ + "key": key.as_str(), + "type": tpe, + "value": value, + }) + }) + .collect::>(); + let references = if span.links.is_empty() { + None + } else { + Some( + span.links + .iter() + .map(|link| { + let span_context = &link.span_context; + serde_json::json!({ + "refType": "FOLLOWS_FROM", + "traceID": span_context.trace_id().to_string(), + "spanID": span_context.span_id().to_string(), + }) + }) + .collect::>(), + ) + }; + serde_json::json!({ + "traceID": span.span_context.trace_id().to_string(), + "spanID": span.span_context.span_id().to_string(), + "startTime": span.start_time.duration_since(SystemTime::UNIX_EPOCH).expect("This does not fail").as_micros() as i64, + "duration": span.end_time.duration_since(span.start_time).expect("This does not fail").as_micros() as i64, + "operationName": span.name, + "tags": tags, + "logs": events, + "flags": span.span_context.trace_flags().to_u8(), + "processID": "p1", + "warnings": None::, + "references": references, + }) +} + +fn opentelemetry_value_to_json(value: &opentelemetry::Value) -> (&str, serde_json::Value) { + match value { + opentelemetry::Value::Bool(b) => ("bool", serde_json::json!(b)), + opentelemetry::Value::I64(i) => ("int64", serde_json::json!(i)), + opentelemetry::Value::F64(f) => ("float64", serde_json::json!(f)), + opentelemetry::Value::String(s) => ("string", serde_json::json!(s.as_str())), + v @ opentelemetry::Value::Array(_) => ("string", serde_json::json!(v.to_string())), + } +} + +/// Jaeger Json Runtime is an extension to [`TraceRuntime`]. +/// +/// [`TraceRuntime`]: opentelemetry::sdk::trace::TraceRuntime +#[async_trait] +pub trait JaegerJsonRuntime: TraceRuntime + std::fmt::Debug { + /// Create a new directory if the given path does not exist yet + async fn create_dir(&self, path: &Path) -> ExportResult; + /// Write the provided content to a new file at the given path + async fn write_to_file(&self, path: &Path, content: &[u8]) -> ExportResult; +} + +#[cfg(feature = "rt-tokio")] +#[async_trait] +impl JaegerJsonRuntime for opentelemetry::runtime::Tokio { + async fn create_dir(&self, path: &Path) -> ExportResult { + if tokio::fs::metadata(path).await.is_err() { + tokio::fs::create_dir_all(path) + .await + .map_err(|e| TraceError::Other(Box::new(e)))? + } + + Ok(()) + } + + async fn write_to_file(&self, path: &Path, content: &[u8]) -> ExportResult { + use tokio::io::AsyncWriteExt; + + let mut file = tokio::fs::File::create(path) + .await + .map_err(|e| TraceError::Other(Box::new(e)))?; + file.write_all(content) + .await + .map_err(|e| TraceError::Other(Box::new(e)))?; + file.sync_data() + .await + .map_err(|e| TraceError::Other(Box::new(e)))?; + + Ok(()) + } +} + +#[cfg(feature = "rt-tokio-current-thread")] +#[async_trait] +impl JaegerJsonRuntime for opentelemetry::runtime::TokioCurrentThread { + async fn create_dir(&self, path: &Path) -> ExportResult { + if tokio::fs::metadata(path).await.is_err() { + tokio::fs::create_dir_all(path) + .await + .map_err(|e| TraceError::Other(Box::new(e)))? + } + + Ok(()) + } + + async fn write_to_file(&self, path: &Path, content: &[u8]) -> ExportResult { + use tokio::io::AsyncWriteExt; + + let mut file = tokio::fs::File::create(path) + .await + .map_err(|e| TraceError::Other(Box::new(e)))?; + file.write_all(content) + .await + .map_err(|e| TraceError::Other(Box::new(e)))?; + file.sync_data() + .await + .map_err(|e| TraceError::Other(Box::new(e)))?; + + Ok(()) + } +} + +#[cfg(feature = "rt-async-std")] +#[async_trait] +impl JaegerJsonRuntime for opentelemetry::runtime::AsyncStd { + async fn create_dir(&self, path: &Path) -> ExportResult { + if async_std::fs::metadata(path).await.is_err() { + async_std::fs::create_dir_all(path) + .await + .map_err(|e| TraceError::Other(Box::new(e)))?; + } + Ok(()) + } + + async fn write_to_file(&self, path: &Path, content: &[u8]) -> ExportResult { + use async_std::io::WriteExt; + + let mut file = async_std::fs::File::create(path) + .await + .map_err(|e| TraceError::Other(Box::new(e)))?; + file.write_all(content) + .await + .map_err(|e| TraceError::Other(Box::new(e)))?; + file.sync_data() + .await + .map_err(|e| TraceError::Other(Box::new(e)))?; + + Ok(()) + } +} diff --git a/opentelemetry-contrib/src/trace/exporter/mod.rs b/opentelemetry-contrib/src/trace/exporter/mod.rs new file mode 100644 index 0000000000..6bef8e5519 --- /dev/null +++ b/opentelemetry-contrib/src/trace/exporter/mod.rs @@ -0,0 +1,13 @@ +//! # Opentelemetry exporter contrib +//! +//! This module provides exporters for third party vendor format or experimental propagators that +//! aren't part of Opentelemetry. +//! +//! Currently, the following exporters are supported: +//! +//! * `jaeger_json`, which allows to export traces into files using jaegers json format +//! +//! This module also provides relative types for those exporters. + +#[cfg(feature = "jaeger_json_exporter")] +pub mod jaeger_json; diff --git a/opentelemetry-contrib/src/trace/mod.rs b/opentelemetry-contrib/src/trace/mod.rs index 17397494d9..6f9c4fef49 100644 --- a/opentelemetry-contrib/src/trace/mod.rs +++ b/opentelemetry-contrib/src/trace/mod.rs @@ -1,4 +1,5 @@ //! # Opentelemetry trace contrib //! +pub mod exporter; pub mod propagator; diff --git a/opentelemetry-contrib/src/trace/propagator/mod.rs b/opentelemetry-contrib/src/trace/propagator/mod.rs index 5412888536..e4ec13e16f 100644 --- a/opentelemetry-contrib/src/trace/propagator/mod.rs +++ b/opentelemetry-contrib/src/trace/propagator/mod.rs @@ -6,7 +6,6 @@ //! Currently, the following propagators are supported: //! //! * `binary_propagator`, propagating trace context in the binary format. -//! * `XrayPropagator`, propagating via AWS XRay protocol. //! //! This module also provides relative types for those propagators. pub mod binary; diff --git a/opentelemetry-datadog/Cargo.toml b/opentelemetry-datadog/Cargo.toml index 2d65cfad62..02a5140d6e 100644 --- a/opentelemetry-datadog/Cargo.toml +++ b/opentelemetry-datadog/Cargo.toml @@ -24,17 +24,19 @@ surf-client = ["surf", "opentelemetry-http/surf"] [dependencies] async-trait = "0.1" -indexmap = "1.6" -opentelemetry = { version = "0.18.0", path = "../opentelemetry", features = ["trace"] } +indexmap = "=1.8" +once_cell = "1.12" +opentelemetry = { version = "0.18", path = "../opentelemetry", features = ["trace"] } opentelemetry-http = { version = "0.7", path = "../opentelemetry-http" } opentelemetry-semantic-conventions = { version = "0.10", path = "../opentelemetry-semantic-conventions" } rmp = "0.8" +url = "2.2" reqwest = { version = "0.11", default-features = false, optional = true } surf = { version = "2.0", default-features = false, optional = true } thiserror = "1.0" itertools = "0.10" http = "0.2" -lazy_static = "1.4" +futures-core = "0.3" [dev-dependencies] base64 = "0.13" diff --git a/opentelemetry-datadog/src/exporter/mod.rs b/opentelemetry-datadog/src/exporter/mod.rs index f7a29a8392..c9ad748205 100644 --- a/opentelemetry-datadog/src/exporter/mod.rs +++ b/opentelemetry-datadog/src/exporter/mod.rs @@ -5,10 +5,11 @@ pub use model::ApiVersion; pub use model::Error; pub use model::FieldMappingFn; +use std::borrow::Cow; use std::fmt::{Debug, Formatter}; use crate::exporter::model::FieldMapping; -use async_trait::async_trait; +use futures_core::future::BoxFuture; use http::{Method, Request, Uri}; use itertools::Itertools; use opentelemetry::sdk::export::trace; @@ -23,6 +24,7 @@ use opentelemetry_http::{HttpClient, ResponseExt}; use opentelemetry_semantic_conventions as semcov; use std::sync::Arc; use std::time::Duration; +use url::Url; /// Default Datadog collector endpoint const DEFAULT_AGENT_ENDPOINT: &str = "http://127.0.0.1:8126"; @@ -32,7 +34,7 @@ const DATADOG_TRACE_COUNT_HEADER: &str = "X-Datadog-Trace-Count"; /// Datadog span exporter pub struct DatadogExporter { - client: Box, + client: Arc, request_url: Uri, model_config: ModelConfig, version: ApiVersion, @@ -47,7 +49,7 @@ impl DatadogExporter { model_config: ModelConfig, request_url: Uri, version: ApiVersion, - client: Box, + client: Arc, resource_mapping: Option, name_mapping: Option, service_name_mapping: Option, @@ -62,6 +64,27 @@ impl DatadogExporter { service_name_mapping, } } + + fn build_request(&self, batch: Vec) -> Result>, TraceError> { + let traces: Vec> = group_into_traces(batch); + let trace_count = traces.len(); + let data = self.version.encode( + &self.model_config, + traces, + self.service_name_mapping.clone(), + self.name_mapping.clone(), + self.resource_mapping.clone(), + )?; + let req = Request::builder() + .method(Method::POST) + .uri(self.request_url.clone()) + .header(http::header::CONTENT_TYPE, self.version.content_type()) + .header(DATADOG_TRACE_COUNT_HEADER, trace_count) + .body(data) + .map_err::(Into::into)?; + + Ok(req) + } } impl Debug for DatadogExporter { @@ -92,8 +115,7 @@ pub struct DatadogPipelineBuilder { agent_endpoint: String, trace_config: Option, version: ApiVersion, - client: Option>, - + client: Option>, resource_mapping: Option, name_mapping: Option, service_name_mapping: Option, @@ -120,15 +142,15 @@ impl Default for DatadogPipelineBuilder { not(feature = "reqwest-blocking-client"), feature = "surf-client" ))] - client: Some(Box::new(surf::Client::new())), + client: Some(Arc::new(surf::Client::new())), #[cfg(all( not(feature = "surf-client"), not(feature = "reqwest-blocking-client"), feature = "reqwest-client" ))] - client: Some(Box::new(reqwest::Client::new())), + client: Some(Arc::new(reqwest::Client::new())), #[cfg(feature = "reqwest-blocking-client")] - client: Some(Box::new(reqwest::blocking::Client::new())), + client: Some(Arc::new(reqwest::blocking::Client::new())), } } } @@ -164,18 +186,16 @@ impl DatadogPipelineBuilder { let service_name = self.service_name.take(); if let Some(service_name) = service_name { let config = if let Some(mut cfg) = self.trace_config.take() { - cfg.resource = cfg.resource.map(|r| { - let without_service_name = r + cfg.resource = Cow::Owned(Resource::new( + cfg.resource .iter() .filter(|(k, _v)| **k != semcov::resource::SERVICE_NAME) - .map(|(k, v)| KeyValue::new(k.clone(), v.clone())) - .collect::>(); - Arc::new(Resource::new(without_service_name)) - }); + .map(|(k, v)| KeyValue::new(k.clone(), v.clone())), + )); cfg } else { Config { - resource: Some(Arc::new(Resource::empty())), + resource: Cow::Owned(Resource::empty()), ..Default::default() } }; @@ -189,7 +209,7 @@ impl DatadogPipelineBuilder { ( Config { // use a empty resource to prevent TracerProvider to assign a service name. - resource: Some(Arc::new(Resource::empty())), + resource: Cow::Owned(Resource::empty()), ..Default::default() }, service_name, @@ -197,6 +217,25 @@ impl DatadogPipelineBuilder { } } + // parse the endpoint and append the path based on versions. + // keep the query and host the same. + fn build_endpoint(agent_endpoint: &str, version: &str) -> Result { + // build agent endpoint based on version + let mut endpoint = agent_endpoint + .parse::() + .map_err::(Into::into)?; + let mut paths = endpoint + .path_segments() + .map(|c| c.filter(|s| !s.is_empty()).collect::>()) + .unwrap_or_default(); + paths.push(version); + + let path_str = paths.join("/"); + endpoint.set_path(path_str.as_str()); + + Ok(endpoint.as_str().parse().map_err::(Into::into)?) + } + fn build_exporter_with_service_name( self, service_name: String, @@ -206,10 +245,10 @@ impl DatadogPipelineBuilder { service_name, ..Default::default() }; - let endpoint = self.agent_endpoint + self.version.path(); + let exporter = DatadogExporter::new( model_config, - endpoint.parse().map_err::(Into::into)?, + Self::build_endpoint(&self.agent_endpoint, self.version.path())?, self.version, client, self.resource_mapping, @@ -266,7 +305,9 @@ impl DatadogPipelineBuilder { self } - /// Assign the Datadog collector endpoint + /// Assign the Datadog collector endpoint. + /// + /// The endpoint of the datadog agent, by default it is `http://127.0.0.1:8126`. pub fn with_agent_endpoint>(mut self, endpoint: T) -> Self { self.agent_endpoint = endpoint.into(); self @@ -275,7 +316,7 @@ impl DatadogPipelineBuilder { /// Choose the http client used by uploader pub fn with_http_client( mut self, - client: Box, + client: Arc, ) -> Self { self.client = Some(client); self @@ -333,28 +374,24 @@ fn group_into_traces(spans: Vec) -> Vec> { .collect() } -#[async_trait] +async fn send_request( + client: Arc, + request: http::Request>, +) -> trace::ExportResult { + let _ = client.send(request).await?.error_for_status()?; + Ok(()) +} + impl trace::SpanExporter for DatadogExporter { /// Export spans to datadog-agent - async fn export(&mut self, batch: Vec) -> trace::ExportResult { - let traces: Vec> = group_into_traces(batch); - let trace_count = traces.len(); - let data = self.version.encode( - &self.model_config, - traces, - self.service_name_mapping.clone(), - self.name_mapping.clone(), - self.resource_mapping.clone(), - )?; - let req = Request::builder() - .method(Method::POST) - .uri(self.request_url.clone()) - .header(http::header::CONTENT_TYPE, self.version.content_type()) - .header(DATADOG_TRACE_COUNT_HEADER, trace_count) - .body(data) - .map_err::(Into::into)?; - let _ = self.client.send(req).await?.error_for_status()?; - Ok(()) + fn export(&mut self, batch: Vec) -> BoxFuture<'static, trace::ExportResult> { + let request = match self.build_request(batch) { + Ok(req) => req, + Err(err) => return Box::pin(std::future::ready(Err(err))), + }; + + let client = self.client.clone(); + Box::pin(send_request(client, request)) } } @@ -379,6 +416,7 @@ fn mapping_debug(f: &Option) -> String { #[cfg(test)] mod tests { use super::*; + use crate::ApiVersion::Version05; use crate::exporter::model::tests::get_span; @@ -396,4 +434,34 @@ mod tests { assert_eq!(traces, expected); } + + #[test] + fn test_agent_endpoint_with_version() { + let with_tail_slash = + DatadogPipelineBuilder::build_endpoint("http://localhost:8126/", Version05.path()); + let without_tail_slash = + DatadogPipelineBuilder::build_endpoint("http://localhost:8126", Version05.path()); + let with_query = DatadogPipelineBuilder::build_endpoint( + "http://localhost:8126?api_key=123", + Version05.path(), + ); + let invalid = DatadogPipelineBuilder::build_endpoint( + "http://localhost:klsajfjksfh", + Version05.path(), + ); + + assert_eq!( + with_tail_slash.unwrap().to_string(), + "http://localhost:8126/v0.5/traces" + ); + assert_eq!( + without_tail_slash.unwrap().to_string(), + "http://localhost:8126/v0.5/traces" + ); + assert_eq!( + with_query.unwrap().to_string(), + "http://localhost:8126/v0.5/traces?api_key=123" + ); + assert!(invalid.is_err()) + } } diff --git a/opentelemetry-datadog/src/exporter/model/mod.rs b/opentelemetry-datadog/src/exporter/model/mod.rs index 3bb2f95066..6e0773c0ff 100644 --- a/opentelemetry-datadog/src/exporter/model/mod.rs +++ b/opentelemetry-datadog/src/exporter/model/mod.rs @@ -1,13 +1,18 @@ use crate::exporter::ModelConfig; +use http::uri; use opentelemetry::sdk::export::{ trace::{self, SpanData}, ExportError, }; use std::fmt::Debug; +use url::ParseError; mod v03; mod v05; +// https://github.com/DataDog/dd-trace-js/blob/c89a35f7d27beb4a60165409376e170eacb194c5/packages/dd-trace/src/constants.js#L4 +static SAMPLING_PRIORITY_KEY: &str = "_sampling_priority_v1"; + /// Custom mapping between opentelemetry spans and datadog spans. /// /// User can provide custom function to change the mapping. It currently supports customizing the following @@ -73,8 +78,8 @@ pub enum Error { #[error(transparent)] RequestError(#[from] http::Error), /// The Uri was invalid - #[error(transparent)] - InvalidUri(#[from] http::uri::InvalidUri), + #[error("invalid url {0}")] + InvalidUri(String), /// Other errors #[error("{0}")] Other(String), @@ -92,6 +97,18 @@ impl From for Error { } } +impl From for Error { + fn from(err: ParseError) -> Self { + Self::InvalidUri(err.to_string()) + } +} + +impl From for Error { + fn from(err: uri::InvalidUri) -> Self { + Self::InvalidUri(err.to_string()) + } +} + /// Version of datadog trace ingestion API #[derive(Debug, Copy, Clone)] #[non_exhaustive] @@ -165,12 +182,13 @@ impl ApiVersion { #[cfg(test)] pub(crate) mod tests { use super::*; - use opentelemetry::sdk; use opentelemetry::sdk::InstrumentationLibrary; + use opentelemetry::sdk::{self, Resource}; use opentelemetry::{ trace::{SpanContext, SpanId, SpanKind, Status, TraceFlags, TraceId, TraceState}, Key, }; + use std::borrow::Cow; use std::time::{Duration, SystemTime}; fn get_traces() -> Vec> { @@ -207,7 +225,7 @@ pub(crate) mod tests { events, links, status: Status::Ok, - resource: None, + resource: Cow::Owned(Resource::empty()), instrumentation_lib: InstrumentationLibrary::new("component", None, None), } } @@ -227,7 +245,7 @@ pub(crate) mod tests { None, )?); - assert_eq!(encoded.as_str(), "kZGLpHR5cGWjd2Vip3NlcnZpY2Wsc2VydmljZV9uYW1lpG5hbWWpY29tcG9uZW50qHJlc291cmNlqHJlc291cmNlqHRyYWNlX2lkzwAAAAAAAAAHp3NwYW5faWTPAAAAAAAAAGOpcGFyZW50X2lkzwAAAAAAAAABpXN0YXJ00wAAAAAAAAAAqGR1cmF0aW9u0wAAAAA7msoApWVycm9y0gAAAACkbWV0YYGpc3Bhbi50eXBlo3dlYg=="); + assert_eq!(encoded.as_str(), "kZGLpHR5cGWjd2Vip3NlcnZpY2Wsc2VydmljZV9uYW1lpG5hbWWpY29tcG9uZW50qHJlc291cmNlqHJlc291cmNlqHRyYWNlX2lkzwAAAAAAAAAHp3NwYW5faWTPAAAAAAAAAGOpcGFyZW50X2lkzwAAAAAAAAABpXN0YXJ00wAAAAAAAAAAqGR1cmF0aW9u0wAAAAA7msoApWVycm9y0gAAAACkbWV0YYGpc3Bhbi50eXBlo3dlYqdtZXRyaWNzgbVfc2FtcGxpbmdfcHJpb3JpdHlfdjHLAAAAAAAAAAA="); Ok(()) } @@ -248,7 +266,7 @@ pub(crate) mod tests { )?); assert_eq!(encoded.as_str(), - "kpWjd2VirHNlcnZpY2VfbmFtZaljb21wb25lbnSocmVzb3VyY2Wpc3Bhbi50eXBlkZGczgAAAAHOAAAAAs4AAAADzwAAAAAAAAAHzwAAAAAAAABjzwAAAAAAAAAB0wAAAAAAAAAA0wAAAAA7msoA0gAAAACBzgAAAATOAAAAAIDOAAAAAA=="); + "kpajd2VirHNlcnZpY2VfbmFtZaljb21wb25lbnSocmVzb3VyY2Wpc3Bhbi50eXBltV9zYW1wbGluZ19wcmlvcml0eV92MZGRnM4AAAABzgAAAALOAAAAA88AAAAAAAAAB88AAAAAAAAAY88AAAAAAAAAAdMAAAAAAAAAANMAAAAAO5rKANIAAAAAgc4AAAAEzgAAAACBzgAAAAXLAAAAAAAAAADOAAAAAA=="); Ok(()) } diff --git a/opentelemetry-datadog/src/exporter/model/v03.rs b/opentelemetry-datadog/src/exporter/model/v03.rs index 9769dc165f..b9310d75fc 100644 --- a/opentelemetry-datadog/src/exporter/model/v03.rs +++ b/opentelemetry-datadog/src/exporter/model/v03.rs @@ -1,4 +1,4 @@ -use crate::exporter::model::Error; +use crate::exporter::model::{Error, SAMPLING_PRIORITY_KEY}; use crate::exporter::ModelConfig; use opentelemetry::sdk::export::trace; use opentelemetry::sdk::export::trace::SpanData; @@ -41,7 +41,7 @@ where if let Some(Value::String(s)) = span.attributes.get(&Key::new("span.type")) { rmp::encode::write_map_len(&mut encoded, 11)?; rmp::encode::write_str(&mut encoded, "type")?; - rmp::encode::write_str(&mut encoded, s.as_ref())?; + rmp::encode::write_str(&mut encoded, s.as_str())?; } else { rmp::encode::write_map_len(&mut encoded, 10)?; } @@ -95,6 +95,18 @@ where rmp::encode::write_str(&mut encoded, key.as_str())?; rmp::encode::write_str(&mut encoded, value.as_str().as_ref())?; } + + rmp::encode::write_str(&mut encoded, "metrics")?; + rmp::encode::write_map_len(&mut encoded, 1)?; + rmp::encode::write_str(&mut encoded, SAMPLING_PRIORITY_KEY)?; + rmp::encode::write_f64( + &mut encoded, + if span.span_context.is_sampled() { + 1.0 + } else { + 0.0 + }, + )?; } } diff --git a/opentelemetry-datadog/src/exporter/model/v05.rs b/opentelemetry-datadog/src/exporter/model/v05.rs index 354ce2f108..94ab1df9b1 100644 --- a/opentelemetry-datadog/src/exporter/model/v05.rs +++ b/opentelemetry-datadog/src/exporter/model/v05.rs @@ -1,4 +1,5 @@ use crate::exporter::intern::StringInterner; +use crate::exporter::model::SAMPLING_PRIORITY_KEY; use crate::exporter::{Error, ModelConfig}; use opentelemetry::sdk::export::trace; use opentelemetry::sdk::export::trace::SpanData; @@ -120,7 +121,7 @@ where .unwrap_or(0); let span_type = match span.attributes.get(&Key::new("span.type")) { - Some(Value::String(s)) => interner.intern(s.as_ref()), + Some(Value::String(s)) => interner.intern(s.as_str()), _ => interner.intern(""), }; @@ -161,7 +162,16 @@ where rmp::encode::write_u32(&mut encoded, interner.intern(key.as_str()))?; rmp::encode::write_u32(&mut encoded, interner.intern(value.as_str().as_ref()))?; } - rmp::encode::write_map_len(&mut encoded, 0)?; + rmp::encode::write_map_len(&mut encoded, 1)?; + rmp::encode::write_u32(&mut encoded, interner.intern(SAMPLING_PRIORITY_KEY))?; + rmp::encode::write_f64( + &mut encoded, + if span.span_context.is_sampled() { + 1.0 + } else { + 0.0 + }, + )?; rmp::encode::write_u32(&mut encoded, span_type)?; } } diff --git a/opentelemetry-datadog/src/lib.rs b/opentelemetry-datadog/src/lib.rs index a6ac1d9826..be87f1c5e9 100644 --- a/opentelemetry-datadog/src/lib.rs +++ b/opentelemetry-datadog/src/lib.rs @@ -137,6 +137,7 @@ mod exporter; mod propagator { + use once_cell::sync::Lazy; use opentelemetry::{ propagation::{text_map_propagator::FieldIter, Extractor, Injector, TextMapPropagator}, trace::{SpanContext, SpanId, TraceContextExt, TraceFlags, TraceId, TraceState}, @@ -149,13 +150,13 @@ mod propagator { const TRACE_FLAG_DEFERRED: TraceFlags = TraceFlags::new(0x02); - lazy_static::lazy_static! { - static ref DATADOG_HEADER_FIELDS: [String; 3] = [ + static DATADOG_HEADER_FIELDS: Lazy<[String; 3]> = Lazy::new(|| { + [ DATADOG_TRACE_ID_HEADER.to_string(), DATADOG_PARENT_ID_HEADER.to_string(), DATADOG_SAMPLING_PRIORITY_HEADER.to_string(), - ]; - } + ] + }); enum SamplingPriority { UserReject = -1, diff --git a/opentelemetry-dynatrace/Cargo.toml b/opentelemetry-dynatrace/Cargo.toml index dabcf64e30..9511a4f6ac 100644 --- a/opentelemetry-dynatrace/Cargo.toml +++ b/opentelemetry-dynatrace/Cargo.toml @@ -49,7 +49,7 @@ wasm = [ ] [dependencies] -async-std = { version = "1", features = ["unstable"], optional = true } +async-std = { version = "= 1.10.0", features = ["unstable"], optional = true } base64 = { version = "0.13", optional = true } futures = "0.3" futures-util = { version = "0.3", optional = true } @@ -57,7 +57,7 @@ getrandom = { version = "0.2", optional = true } http = "0.2" isahc = { version = "1.4", default-features = false, optional = true } js-sys = { version = "0.3.5", optional = true } -opentelemetry = { version = "0.18.0", path = "../opentelemetry", default-features = false } +opentelemetry = { version = "0.18", path = "../opentelemetry", default-features = false } opentelemetry-http = { version = "0.7", path = "../opentelemetry-http", default-features = false } pin-project = { version = "1.0", optional = true } reqwest = { version = "0.11", default-features = false, optional = true } diff --git a/opentelemetry-dynatrace/src/transform/metrics.rs b/opentelemetry-dynatrace/src/transform/metrics.rs index f0aca6ffdf..3b409b5f10 100644 --- a/opentelemetry-dynatrace/src/transform/metrics.rs +++ b/opentelemetry-dynatrace/src/transform/metrics.rs @@ -801,7 +801,7 @@ mod tests { let dimensions = DimensionSet::from(vec![ KeyValue::new("KEY", "VALUE"), KeyValue::new("test.abc_123-", "value.123_foo-bar"), - KeyValue::new(METRICS_SOURCE, "opentelemetry".to_string()), + KeyValue::new(METRICS_SOURCE, "opentelemetry"), ]); let expect = vec![ @@ -892,7 +892,7 @@ mod tests { let dimensions = DimensionSet::from(vec![ KeyValue::new("KEY", "VALUE"), KeyValue::new("test.abc_123-", "value.123_foo-bar"), - KeyValue::new(METRICS_SOURCE, "opentelemetry".to_string()), + KeyValue::new(METRICS_SOURCE, "opentelemetry"), ]); let expect = vec![MetricLine { @@ -936,7 +936,7 @@ mod tests { let dimensions = DimensionSet::from(vec![ KeyValue::new("KEY", "VALUE"), KeyValue::new("test.abc_123-", "value.123_foo-bar"), - KeyValue::new(METRICS_SOURCE, "opentelemetry".to_string()), + KeyValue::new(METRICS_SOURCE, "opentelemetry"), ]); let expect = vec![MetricLine { @@ -1005,7 +1005,7 @@ mod tests { let dimensions = DimensionSet::from(vec![ KeyValue::new("KEY", "VALUE"), KeyValue::new("test.abc_123-", "value.123_foo-bar"), - KeyValue::new(METRICS_SOURCE, "opentelemetry".to_string()), + KeyValue::new(METRICS_SOURCE, "opentelemetry"), ]); let expect = vec![MetricLine { @@ -1072,7 +1072,7 @@ mod tests { let dimensions = DimensionSet::from(vec![ KeyValue::new("KEY", "VALUE"), KeyValue::new("test.abc_123-", "value.123_foo-bar"), - KeyValue::new(METRICS_SOURCE, "opentelemetry".to_string()), + KeyValue::new(METRICS_SOURCE, "opentelemetry"), ]); let expect = vec![MetricLine { @@ -1142,7 +1142,7 @@ mod tests { let dimensions = DimensionSet::from(vec![ KeyValue::new("KEY", "VALUE"), KeyValue::new("test.abc_123-", "value.123_foo-bar"), - KeyValue::new(METRICS_SOURCE, "opentelemetry".to_string()), + KeyValue::new(METRICS_SOURCE, "opentelemetry"), ]); let expect = vec![MetricLine { diff --git a/opentelemetry-http/Cargo.toml b/opentelemetry-http/Cargo.toml index f55b3ef9e0..ed708029dc 100644 --- a/opentelemetry-http/Cargo.toml +++ b/opentelemetry-http/Cargo.toml @@ -13,6 +13,6 @@ async-trait = "0.1" bytes = "1" http = "0.2" isahc = { version = "1.4", default-features = false, optional = true } -opentelemetry = { version = "0.18.0", path = "../opentelemetry", features = ["trace"] } +opentelemetry-api = { version = "0.18", path = "../opentelemetry-api", features = ["trace"] } reqwest = { version = "0.11", default-features = false, features = ["blocking"], optional = true } surf = { version = "2.0", default-features = false, optional = true } diff --git a/opentelemetry-http/README.md b/opentelemetry-http/README.md index a1e44a7734..7dffb8e4d1 100644 --- a/opentelemetry-http/README.md +++ b/opentelemetry-http/README.md @@ -29,7 +29,7 @@ helper types to inject and extract key value pairs into/from HTTP headers. ## Supported Rust Versions OpenTelemetry is built against the latest stable release. The minimum supported -version is 1.46. The current OpenTelemetry version is not guaranteed to build +version is 1.49. The current OpenTelemetry version is not guaranteed to build on Rust versions earlier than the minimum supported version. The current stable Rust compiler and the three most recent minor versions diff --git a/opentelemetry-http/src/lib.rs b/opentelemetry-http/src/lib.rs index 2aa696c0b1..84f83b74e5 100644 --- a/opentelemetry-http/src/lib.rs +++ b/opentelemetry-http/src/lib.rs @@ -1,9 +1,12 @@ use std::fmt::Debug; +#[doc(no_inline)] +pub use bytes::Bytes; +#[doc(no_inline)] +pub use http::{Request, Response}; + use async_trait::async_trait; -use bytes::Bytes; -use http::{Request, Response}; -use opentelemetry::{ +use opentelemetry_api::{ propagation::{Extractor, Injector}, trace::TraceError, }; diff --git a/opentelemetry-jaeger/Cargo.toml b/opentelemetry-jaeger/Cargo.toml index 8124d5a0df..9179470b95 100644 --- a/opentelemetry-jaeger/Cargo.toml +++ b/opentelemetry-jaeger/Cargo.toml @@ -19,16 +19,18 @@ all-features = true rustdoc-args = ["--cfg", "docsrs"] [dependencies] -async-std = { version = "1.6", optional = true } +async-std = { version = "= 1.10.0", optional = true } async-trait = "0.1" base64 = { version = "0.13", optional = true } +futures = "0.3" futures-util = { version = "0.3", default-features = false, features = ["std"], optional = true } +futures-executor = "0.3" headers = { version = "0.3.2", optional = true } http = { version = "0.2", optional = true } isahc = { version = "1.4", default-features = false, optional = true } js-sys = { version = "0.3", optional = true } -lazy_static = "1.4" -opentelemetry = { version = "0.18.0", default-features = false, features = ["trace"], path = "../opentelemetry" } +once_cell = "1.12" +opentelemetry = { version = "0.18", default-features = false, features = ["trace"], path = "../opentelemetry" } opentelemetry-http = { version = "0.7", path = "../opentelemetry-http", optional = true } opentelemetry-semantic-conventions = { version = "0.10", path = "../opentelemetry-semantic-conventions" } pin-project = { version = "1.0", optional = true } @@ -45,6 +47,7 @@ prost = { version = "0.9.0", optional = true } prost-types = { version = "0.9.0", optional = true } [dev-dependencies] +tokio = { version = "1.0", features = ["net", "sync"] } bytes = "1" futures-executor = "0.3" opentelemetry = { default-features = false, features = ["trace", "testing"], path = "../opentelemetry" } @@ -63,6 +66,18 @@ features = [ optional = true [features] +full = [ + "collector_client", + "isahc_collector_client", + "reqwest_collector_client", + "reqwest_blocking_collector_client", + "surf_collector_client", + "wasm_collector_client", + "rt-tokio", + "rt-tokio-current-thread", + "rt-async-std", + "integration_test" +] default = [] collector_client = ["http", "opentelemetry-http"] isahc_collector_client = ["isahc", "opentelemetry-http/isahc"] diff --git a/opentelemetry-jaeger/README.md b/opentelemetry-jaeger/README.md index e804f261e4..fe357925df 100644 --- a/opentelemetry-jaeger/README.md +++ b/opentelemetry-jaeger/README.md @@ -147,7 +147,7 @@ fn main() -> Result<(), Box> { ## Supported Rust Versions OpenTelemetry is built against the latest stable release. The minimum supported -version is 1.46. The current OpenTelemetry version is not guaranteed to build +version is 1.45. The current OpenTelemetry version is not guaranteed to build on Rust versions earlier than the minimum supported version. The current stable Rust compiler and the three most recent minor versions diff --git a/opentelemetry-jaeger/src/exporter/config/agent.rs b/opentelemetry-jaeger/src/exporter/config/agent.rs index efefc17a9b..22e51afb56 100644 --- a/opentelemetry-jaeger/src/exporter/config/agent.rs +++ b/opentelemetry-jaeger/src/exporter/config/agent.rs @@ -232,11 +232,10 @@ impl AgentPipeline { let mut builder = sdk::trace::TracerProvider::builder(); let (config, process) = build_config_and_process( - builder.sdk_provided_resource(), self.trace_config.take(), self.transformation_config.service_name.take(), ); - let exporter = Exporter::new( + let exporter = Exporter::new_sync( process.into(), self.transformation_config.export_instrument_library, self.build_sync_agent_uploader()?, @@ -270,12 +269,16 @@ impl AgentPipeline { // build sdk trace config and jaeger process. // some attributes like service name has attributes like service name let (config, process) = build_config_and_process( - builder.sdk_provided_resource(), self.trace_config.take(), self.transformation_config.service_name.take(), ); let uploader = self.build_async_agent_uploader(runtime.clone())?; - let exporter = Exporter::new(process.into(), export_instrument_library, uploader); + let exporter = Exporter::new_async( + process.into(), + export_instrument_library, + runtime.clone(), + uploader, + ); builder = builder.with_batch_exporter(exporter, runtime); builder = builder.with_config(config); @@ -312,32 +315,29 @@ impl AgentPipeline { where R: JaegerTraceRuntime, { - let builder = sdk::trace::TracerProvider::builder(); let export_instrument_library = self.transformation_config.export_instrument_library; // build sdk trace config and jaeger process. // some attributes like service name has attributes like service name let (_, process) = build_config_and_process( - builder.sdk_provided_resource(), self.trace_config.take(), self.transformation_config.service_name.take(), ); - let uploader = self.build_async_agent_uploader(runtime)?; - Ok(Exporter::new( + let uploader = self.build_async_agent_uploader(runtime.clone())?; + Ok(Exporter::new_async( process.into(), export_instrument_library, + runtime, uploader, )) } /// Build an jaeger exporter targeting a jaeger agent and running on the sync runtime. pub fn build_sync_agent_exporter(mut self) -> Result { - let builder = sdk::trace::TracerProvider::builder(); let (_, process) = build_config_and_process( - builder.sdk_provided_resource(), self.trace_config.take(), self.transformation_config.service_name.take(), ); - Ok(Exporter::new( + Ok(Exporter::new_sync( process.into(), self.transformation_config.export_instrument_library, self.build_sync_agent_uploader()?, diff --git a/opentelemetry-jaeger/src/exporter/config/collector/http_client.rs b/opentelemetry-jaeger/src/exporter/config/collector/http_client.rs index 34b3779d5f..eaf8d989e9 100644 --- a/opentelemetry-jaeger/src/exporter/config/collector/http_client.rs +++ b/opentelemetry-jaeger/src/exporter/config/collector/http_client.rs @@ -179,18 +179,14 @@ mod collector_client_tests { use crate::exporter::thrift::jaeger::Batch; use crate::new_collector_pipeline; use opentelemetry::runtime::Tokio; - use opentelemetry::sdk::Resource; use opentelemetry::trace::TraceError; - use opentelemetry::KeyValue; #[test] fn test_bring_your_own_client() -> Result<(), TraceError> { let invalid_uri_builder = new_collector_pipeline() .with_endpoint("localhost:6831") .with_http_client(test_http_client::TestHttpClient); - let sdk_provided_resource = - Resource::new(vec![KeyValue::new("service.name", "unknown_service")]); - let (_, process) = build_config_and_process(sdk_provided_resource, None, None); + let (_, process) = build_config_and_process(None, None); let mut uploader = invalid_uri_builder.build_uploader::()?; let res = futures_executor::block_on(async { uploader diff --git a/opentelemetry-jaeger/src/exporter/config/collector/mod.rs b/opentelemetry-jaeger/src/exporter/config/collector/mod.rs index 205c142695..3b1c517058 100644 --- a/opentelemetry-jaeger/src/exporter/config/collector/mod.rs +++ b/opentelemetry-jaeger/src/exporter/config/collector/mod.rs @@ -407,12 +407,16 @@ impl CollectorPipeline { // some attributes like service name has attributes like service name let export_instrument_library = self.transformation_config.export_instrument_library; let (config, process) = build_config_and_process( - builder.sdk_provided_resource(), self.trace_config.take(), self.transformation_config.service_name.take(), ); let uploader = self.build_uploader::()?; - let exporter = Exporter::new(process.into(), export_instrument_library, uploader); + let exporter = Exporter::new_async( + process.into(), + export_instrument_library, + runtime.clone(), + uploader, + ); builder = builder.with_batch_exporter(exporter, runtime); builder = builder.with_config(config); diff --git a/opentelemetry-jaeger/src/exporter/config/mod.rs b/opentelemetry-jaeger/src/exporter/config/mod.rs index a363c0270a..7b314c2ec8 100644 --- a/opentelemetry-jaeger/src/exporter/config/mod.rs +++ b/opentelemetry-jaeger/src/exporter/config/mod.rs @@ -10,12 +10,9 @@ //! [jaeger deployment guide]: https://www.jaegertracing.io/docs/1.31/deployment use crate::Process; -use opentelemetry::sdk::trace::Config; -use opentelemetry::sdk::Resource; use opentelemetry::trace::{TraceError, TracerProvider}; use opentelemetry::{global, sdk, KeyValue}; use opentelemetry_semantic_conventions as semcov; -use std::sync::Arc; /// Config a exporter that sends the spans to a [jaeger agent](https://www.jaegertracing.io/docs/1.31/deployment/#agent). pub mod agent; @@ -54,35 +51,25 @@ trait HasRequiredConfig { // There are multiple ways to set the service name. A `service.name` tag will be always added // to the process tags. fn build_config_and_process( - sdk_resource: sdk::Resource, - mut config: Option, + config: Option, service_name_opt: Option, ) -> (sdk::trace::Config, Process) { - let (config, resource) = if let Some(mut config) = config.take() { - let resource = if let Some(resource) = config.resource.replace(Arc::new(Resource::empty())) - { - sdk_resource.merge(resource) - } else { - sdk_resource - }; - - (config, resource) - } else { - (Config::default(), sdk_resource) - }; + let config = config.unwrap_or_default(); let service_name = service_name_opt.unwrap_or_else(|| { - resource + config + .resource .get(semcov::resource::SERVICE_NAME) .map(|v| v.to_string()) .unwrap_or_else(|| "unknown_service".to_string()) }); // merge the tags and resource. Resources take priority. - let mut tags = resource - .into_iter() - .filter(|(key, _)| *key != semcov::resource::SERVICE_NAME) - .map(|(key, value)| KeyValue::new(key, value)) + let mut tags = config + .resource + .iter() + .filter(|(key, _)| **key != semcov::resource::SERVICE_NAME) + .map(|(key, value)| KeyValue::new(key.clone(), value.clone())) .collect::>(); tags.push(KeyValue::new( @@ -101,55 +88,24 @@ mod tests { use opentelemetry::sdk::Resource; use opentelemetry::KeyValue; use std::env; - use std::sync::Arc; #[test] fn test_set_service_name() { let service_name = "halloween_service".to_string(); // set via builder's service name, it has highest priority - let (_, process) = - build_config_and_process(Resource::empty(), None, Some(service_name.clone())); + let (_, process) = build_config_and_process(None, Some(service_name.clone())); assert_eq!(process.service_name, service_name); // make sure the tags in resource are moved to process let trace_config = Config::default() .with_resource(Resource::new(vec![KeyValue::new("test-key", "test-value")])); - let (config, process) = - build_config_and_process(Resource::empty(), Some(trace_config), Some(service_name)); - assert_eq!(config.resource, Some(Arc::new(Resource::empty()))); + let (_, process) = build_config_and_process(Some(trace_config), Some(service_name)); assert_eq!(process.tags.len(), 2); - - // sdk provided resource can override service name if users didn't provided service name to builder - let (_, process) = build_config_and_process( - Resource::new(vec![KeyValue::new("service.name", "halloween_service")]), - None, - None, - ); - assert_eq!(process.service_name, "halloween_service"); - - // users can also provided service.name from config's resource, in this case, it will override the - // sdk provided service name - let trace_config = Config::default().with_resource(Resource::new(vec![KeyValue::new( - "service.name", - "override_service", - )])); - let (_, process) = build_config_and_process( - Resource::new(vec![KeyValue::new("service.name", "halloween_service")]), - Some(trace_config), - None, - ); - - assert_eq!(process.service_name, "override_service"); - assert_eq!(process.tags.len(), 1); - assert_eq!( - process.tags[0], - KeyValue::new("service.name", "override_service") - ); } - #[test] - fn test_read_from_env() { + #[tokio::test] + async fn test_read_from_env() { // OTEL_SERVICE_NAME env var also works env::set_var("OTEL_SERVICE_NAME", "test service"); let builder = new_agent_pipeline(); diff --git a/opentelemetry-jaeger/src/exporter/mod.rs b/opentelemetry-jaeger/src/exporter/mod.rs index 0d64a739ea..2a9c16f56a 100644 --- a/opentelemetry-jaeger/src/exporter/mod.rs +++ b/opentelemetry-jaeger/src/exporter/mod.rs @@ -18,7 +18,9 @@ use std::convert::TryFrom; use self::runtime::JaegerTraceRuntime; use self::thrift::jaeger; -use async_trait::async_trait; +use futures::channel::{mpsc, oneshot}; +use futures::future::BoxFuture; +use futures::StreamExt; use std::convert::TryInto; #[cfg(feature = "isahc_collector_client")] @@ -42,25 +44,108 @@ const INSTRUMENTATION_LIBRARY_NAME: &str = "otel.library.name"; /// Instrument Library version MUST be reported in Jaeger Span tags with the following key const INSTRUMENTATION_LIBRARY_VERSION: &str = "otel.library.version"; +#[derive(Debug)] +enum ExportMessage { + Export { + batch: Vec, + tx: oneshot::Sender, + }, + Shutdown, +} + /// Jaeger span exporter #[derive(Debug)] pub struct Exporter { + tx: mpsc::Sender, + + // In the switch to concurrent exports, the non-test code which used this + // value was moved into the ExporterTask implementation. However, there's + // still a test that relies on this value being here, thus the + // allow(dead_code). + #[allow(dead_code)] process: jaeger::Process, - /// Whether or not to export instrumentation information. - export_instrumentation_lib: bool, - uploader: Box, } impl Exporter { - fn new( + fn new_async( + process: jaeger::Process, + export_instrumentation_lib: bool, + runtime: R, + uploader: Box, + ) -> Exporter + where + R: JaegerTraceRuntime, + { + let (tx, rx) = mpsc::channel(64); + + let exporter_task = ExporterTask { + rx, + export_instrumentation_lib, + uploader, + process: process.clone(), + }; + + runtime.spawn(Box::pin(exporter_task.run())); + + Exporter { tx, process } + } + + fn new_sync( process: jaeger::Process, export_instrumentation_lib: bool, uploader: Box, ) -> Exporter { - Exporter { - process, + let (tx, rx) = mpsc::channel(64); + + let exporter_task = ExporterTask { + rx, export_instrumentation_lib, uploader, + process: process.clone(), + }; + + std::thread::spawn(move || { + futures_executor::block_on(exporter_task.run()); + }); + + Exporter { tx, process } + } +} + +struct ExporterTask { + rx: mpsc::Receiver, + process: jaeger::Process, + /// Whether or not to export instrumentation information. + export_instrumentation_lib: bool, + uploader: Box, +} + +impl ExporterTask { + async fn run(mut self) { + while let Some(message) = self.rx.next().await { + match message { + ExportMessage::Export { batch, tx } => { + let mut jaeger_spans: Vec = Vec::with_capacity(batch.len()); + let process = self.process.clone(); + + for span in batch.into_iter() { + jaeger_spans.push(convert_otel_span_into_jaeger_span( + span, + self.export_instrumentation_lib, + )); + } + + let res = self + .uploader + .upload(jaeger::Batch::new(process, jaeger_spans)) + .await; + + // Errors here might be completely expected if the receiver didn't + // care about the result. + let _ = tx.send(res); + } + ExportMessage::Shutdown => break, + } } } } @@ -74,23 +159,20 @@ pub struct Process { pub tags: Vec, } -#[async_trait] impl trace::SpanExporter for Exporter { /// Export spans to Jaeger - async fn export(&mut self, batch: Vec) -> trace::ExportResult { - let mut jaeger_spans: Vec = Vec::with_capacity(batch.len()); - let process = self.process.clone(); - - for span in batch.into_iter() { - jaeger_spans.push(convert_otel_span_into_jaeger_span( - span, - self.export_instrumentation_lib, - )); + fn export(&mut self, batch: Vec) -> BoxFuture<'static, trace::ExportResult> { + let (tx, rx) = oneshot::channel(); + + if let Err(err) = self.tx.try_send(ExportMessage::Export { batch, tx }) { + return Box::pin(futures::future::ready(Err(Into::into(err)))); } - self.uploader - .upload(jaeger::Batch::new(process, jaeger_spans)) - .await + Box::pin(async move { rx.await? }) + } + + fn shutdown(&mut self) { + let _ = self.tx.try_send(ExportMessage::Shutdown); } } diff --git a/opentelemetry-jaeger/src/lib.rs b/opentelemetry-jaeger/src/lib.rs index c921e0672f..24d6921d4c 100644 --- a/opentelemetry-jaeger/src/lib.rs +++ b/opentelemetry-jaeger/src/lib.rs @@ -25,7 +25,8 @@ //! use opentelemetry::trace::Tracer; //! use opentelemetry::global; //! -//! fn main() -> Result<(), opentelemetry::trace::TraceError> { +//! #[tokio::main] +//! async fn main() -> Result<(), opentelemetry::trace::TraceError> { //! global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new()); //! let tracer = opentelemetry_jaeger::new_agent_pipeline().install_simple()?; //! @@ -323,6 +324,7 @@ mod exporter; pub mod testing; mod propagator { + use once_cell::sync::Lazy; use opentelemetry::{ global::{self, Error}, propagation::{text_map_propagator::FieldIter, Extractor, Injector, TextMapPropagator}, @@ -340,9 +342,7 @@ mod propagator { const TRACE_FLAG_DEBUG: TraceFlags = TraceFlags::new(0x04); - lazy_static::lazy_static! { - static ref JAEGER_HEADER_FIELD: [String; 1] = [JAEGER_HEADER.to_string()]; - } + static JAEGER_HEADER_FIELD: Lazy<[String; 1]> = Lazy::new(|| [JAEGER_HEADER.to_owned()]); /// The Jaeger propagator propagates span contexts in [Jaeger propagation format]. /// @@ -512,10 +512,9 @@ mod propagator { } fn extract_with_context(&self, cx: &Context, extractor: &dyn Extractor) -> Context { - cx.with_remote_span_context( - self.extract_span_context(extractor) - .unwrap_or_else(|_| SpanContext::empty_context()), - ) + self.extract_span_context(extractor) + .map(|sc| cx.with_remote_span_context(sc)) + .unwrap_or_else(|_| cx.clone()) } fn fields(&self) -> FieldIter<'_> { diff --git a/opentelemetry-otlp/Cargo.toml b/opentelemetry-otlp/Cargo.toml index 88ea1952ff..fc2873e1f9 100644 --- a/opentelemetry-otlp/Cargo.toml +++ b/opentelemetry-otlp/Cargo.toml @@ -35,12 +35,12 @@ futures-util = { version = "0.3", default-features = false, features = ["std"] } opentelemetry-proto = { version = "0.1", path = "../opentelemetry-proto", default-features = false } grpcio = { version = "0.9", optional = true } -opentelemetry = { version = "0.18.0", default-features = false, features = ["trace"], path = "../opentelemetry" } +opentelemetry = { version = "0.18", default-features = false, features = ["trace"], path = "../opentelemetry" } opentelemetry-http = { version = "0.7", path = "../opentelemetry-http", optional = true } protobuf = { version = "2.18", optional = true } -prost = { version = "0.9", optional = true } -tonic = { version = "0.6.2", optional = true } +prost = { version = "0.10.1", optional = true } +tonic = { version = "0.7.1", optional = true } tokio = { version = "1.0", features = ["sync", "rt"], optional = true } reqwest = { version = "0.11", optional = true, default-features = false } diff --git a/opentelemetry-otlp/src/exporter/http.rs b/opentelemetry-otlp/src/exporter/http.rs index d04b2a64e9..7cb20c47d3 100644 --- a/opentelemetry-otlp/src/exporter/http.rs +++ b/opentelemetry-otlp/src/exporter/http.rs @@ -1,6 +1,7 @@ use crate::{ExportConfig, Protocol}; use opentelemetry_http::HttpClient; use std::collections::HashMap; +use std::sync::Arc; /// Configuration of the http transport #[cfg(feature = "http-proto")] @@ -15,7 +16,7 @@ use std::collections::HashMap; )] pub struct HttpConfig { /// Select the HTTP client - pub client: Option>, + pub client: Option>, /// Additional headers to send to the collector. pub headers: Option>, @@ -30,19 +31,19 @@ impl Default for HttpConfig { fn default() -> Self { HttpConfig { #[cfg(feature = "reqwest-blocking-client")] - client: Some(Box::new(reqwest::blocking::Client::new())), + client: Some(Arc::new(reqwest::blocking::Client::new())), #[cfg(all( not(feature = "reqwest-blocking-client"), not(feature = "surf-client"), feature = "reqwest-client" ))] - client: Some(Box::new(reqwest::Client::new())), + client: Some(Arc::new(reqwest::Client::new())), #[cfg(all( not(feature = "reqwest-client"), not(feature = "reqwest-blocking-client"), feature = "surf-client" ))] - client: Some(Box::new(surf::Client::new())), + client: Some(Arc::new(surf::Client::new())), #[cfg(all( not(feature = "reqwest-client"), not(feature = "surf-client"), @@ -78,7 +79,7 @@ impl Default for HttpExporterBuilder { impl HttpExporterBuilder { /// Assign client implementation pub fn with_http_client(mut self, client: T) -> Self { - self.http_config.client = Some(Box::new(client)); + self.http_config.client = Some(Arc::new(client)); self } diff --git a/opentelemetry-otlp/src/exporter/mod.rs b/opentelemetry-otlp/src/exporter/mod.rs index 1bb85c7ddc..6f54f3166e 100644 --- a/opentelemetry-otlp/src/exporter/mod.rs +++ b/opentelemetry-otlp/src/exporter/mod.rs @@ -12,20 +12,17 @@ use crate::Protocol; use std::str::FromStr; use std::time::Duration; -/// Target to which the exporter is going to send spans or metrics, defaults to https://localhost:4317. +/// Target to which the exporter is going to send signals, defaults to https://localhost:4317. +/// Learn about the relationship between this constant and metrics/spans/logs at +/// pub const OTEL_EXPORTER_OTLP_ENDPOINT: &str = "OTEL_EXPORTER_OTLP_ENDPOINT"; -/// Default target to which the exporter is going to send spans or metrics. +/// Default target to which the exporter is going to send signals. pub const OTEL_EXPORTER_OTLP_ENDPOINT_DEFAULT: &str = "https://localhost:4317"; -/// Max waiting time for the backend to process each spans or metrics batch, defaults to 10 seconds. +/// Max waiting time for the backend to process each signal batch, defaults to 10 seconds. pub const OTEL_EXPORTER_OTLP_TIMEOUT: &str = "OTEL_EXPORTER_OTLP_TIMEOUT"; -/// Default max waiting time for the backend to process each spans or metrics batch. +/// Default max waiting time for the backend to process each signal batch. pub const OTEL_EXPORTER_OTLP_TIMEOUT_DEFAULT: u64 = 10; -/// Target to which the exporter is going to send spans, defaults to https://localhost:4317. -pub const OTEL_EXPORTER_OTLP_TRACES_ENDPOINT: &str = "OTEL_EXPORTER_OTLP_TRACES_ENDPOINT"; -/// Max waiting time for the backend to process each spans batch, defaults to 10s. -pub const OTEL_EXPORTER_OTLP_TRACES_TIMEOUT: &str = "OTEL_EXPORTER_OTLP_TRACES_TIMEOUT"; - #[cfg(feature = "grpc-sys")] pub(crate) mod grpcio; #[cfg(feature = "http-proto")] @@ -36,7 +33,7 @@ pub(crate) mod tonic; /// Configuration for the OTLP exporter. #[derive(Debug)] pub struct ExportConfig { - /// The address of the OTLP collector. If not set, the default address is used. + /// The base address of the OTLP collector. If not set, the default address is used. pub endpoint: String, /// The protocol to use when communicating with the collector. @@ -129,18 +126,15 @@ impl WithExportConfig for B { } fn with_env(mut self) -> Self { - let endpoint = match std::env::var(OTEL_EXPORTER_OTLP_TRACES_ENDPOINT) { + let endpoint = match std::env::var(OTEL_EXPORTER_OTLP_ENDPOINT) { Ok(val) => val, - Err(_) => std::env::var(OTEL_EXPORTER_OTLP_ENDPOINT) - .unwrap_or_else(|_| OTEL_EXPORTER_OTLP_ENDPOINT_DEFAULT.to_string()), + Err(_) => OTEL_EXPORTER_OTLP_ENDPOINT_DEFAULT.to_string(), }; self.export_config().endpoint = endpoint; - let timeout = match std::env::var(OTEL_EXPORTER_OTLP_TRACES_TIMEOUT) { + let timeout = match std::env::var(OTEL_EXPORTER_OTLP_TIMEOUT) { Ok(val) => u64::from_str(&val).unwrap_or(OTEL_EXPORTER_OTLP_TIMEOUT_DEFAULT), - Err(_) => std::env::var(OTEL_EXPORTER_OTLP_TIMEOUT) - .map(|val| u64::from_str(&val).unwrap_or(OTEL_EXPORTER_OTLP_TIMEOUT_DEFAULT)) - .unwrap_or(OTEL_EXPORTER_OTLP_TIMEOUT_DEFAULT), + Err(_) => OTEL_EXPORTER_OTLP_TIMEOUT_DEFAULT, }; self.export_config().timeout = Duration::from_secs(timeout); self @@ -159,17 +153,20 @@ impl WithExportConfig for B { mod tests { use crate::exporter::{ WithExportConfig, OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TIMEOUT, - OTEL_EXPORTER_OTLP_TIMEOUT_DEFAULT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT, - OTEL_EXPORTER_OTLP_TRACES_TIMEOUT, + OTEL_EXPORTER_OTLP_TIMEOUT_DEFAULT, }; use crate::new_exporter; #[test] - fn test_pipeline_builder_from_env() { - std::env::set_var(OTEL_EXPORTER_OTLP_ENDPOINT, "https://otlp_endpoint:4317"); + fn test_pipeline_builder_from_env_default_vars() { + let expected_endpoint = "https://otlp_endpoint:4317"; + std::env::set_var(OTEL_EXPORTER_OTLP_ENDPOINT, expected_endpoint); std::env::set_var(OTEL_EXPORTER_OTLP_TIMEOUT, "bad_timeout"); let mut exporter_builder = new_exporter().tonic().with_env(); + assert_eq!(exporter_builder.exporter_config.endpoint, expected_endpoint); + + exporter_builder = new_exporter().tonic().with_env(); assert_eq!( exporter_builder.exporter_config.timeout, std::time::Duration::from_secs(OTEL_EXPORTER_OTLP_TIMEOUT_DEFAULT) @@ -187,31 +184,5 @@ mod tests { std::env::remove_var(OTEL_EXPORTER_OTLP_TIMEOUT); assert!(std::env::var(OTEL_EXPORTER_OTLP_ENDPOINT).is_err()); assert!(std::env::var(OTEL_EXPORTER_OTLP_TIMEOUT).is_err()); - - // test from traces env var - std::env::set_var( - OTEL_EXPORTER_OTLP_TRACES_ENDPOINT, - "https://otlp_traces_endpoint:4317", - ); - std::env::set_var(OTEL_EXPORTER_OTLP_TRACES_TIMEOUT, "bad_timeout"); - - let mut exporter_builder = new_exporter().tonic().with_env(); - assert_eq!( - exporter_builder.exporter_config.timeout, - std::time::Duration::from_secs(OTEL_EXPORTER_OTLP_TIMEOUT_DEFAULT) - ); - - std::env::set_var(OTEL_EXPORTER_OTLP_TRACES_TIMEOUT, "60"); - - exporter_builder = new_exporter().tonic().with_env(); - assert_eq!( - exporter_builder.exporter_config.timeout, - std::time::Duration::from_secs(60) - ); - - std::env::remove_var(OTEL_EXPORTER_OTLP_TRACES_ENDPOINT); - std::env::remove_var(OTEL_EXPORTER_OTLP_TRACES_TIMEOUT); - assert!(std::env::var(OTEL_EXPORTER_OTLP_TRACES_ENDPOINT).is_err()); - assert!(std::env::var(OTEL_EXPORTER_OTLP_TRACES_TIMEOUT).is_err()); } } diff --git a/opentelemetry-otlp/src/lib.rs b/opentelemetry-otlp/src/lib.rs index adf6f70aae..8588de6ffd 100644 --- a/opentelemetry-otlp/src/lib.rs +++ b/opentelemetry-otlp/src/lib.rs @@ -189,16 +189,21 @@ mod transform; pub use crate::exporter::ExportConfig; #[cfg(feature = "trace")] -pub use crate::span::{OtlpTracePipeline, SpanExporter, SpanExporterBuilder}; +pub use crate::span::{ + OtlpTracePipeline, SpanExporter, SpanExporterBuilder, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT, + OTEL_EXPORTER_OTLP_TRACES_TIMEOUT, +}; #[cfg(feature = "metrics")] -pub use crate::metric::{MetricsExporter, OtlpMetricPipeline}; +pub use crate::metric::{ + MetricsExporter, OtlpMetricPipeline, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT, + OTEL_EXPORTER_OTLP_METRICS_TIMEOUT, +}; pub use crate::exporter::{ HasExportConfig, WithExportConfig, OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_ENDPOINT_DEFAULT, OTEL_EXPORTER_OTLP_TIMEOUT, - OTEL_EXPORTER_OTLP_TIMEOUT_DEFAULT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT, - OTEL_EXPORTER_OTLP_TRACES_TIMEOUT, + OTEL_EXPORTER_OTLP_TIMEOUT_DEFAULT, }; use opentelemetry::sdk::export::ExportError; diff --git a/opentelemetry-otlp/src/metric.rs b/opentelemetry-otlp/src/metric.rs index 95bd07667b..a3efd7065b 100644 --- a/opentelemetry-otlp/src/metric.rs +++ b/opentelemetry-otlp/src/metric.rs @@ -25,15 +25,25 @@ use opentelemetry_proto::tonic::collector::metrics::v1::{ metrics_service_client::MetricsServiceClient, ExportMetricsServiceRequest, }; use std::fmt::{Debug, Formatter}; +#[cfg(feature = "grpc-tonic")] +use std::str::FromStr; use std::sync::Arc; use std::sync::Mutex; use std::time; +use std::time::Duration; use tonic::metadata::KeyAndValueRef; #[cfg(feature = "grpc-tonic")] use tonic::transport::Channel; #[cfg(feature = "grpc-tonic")] use tonic::Request; +/// Target to which the exporter is going to send metrics, defaults to https://localhost:4317/v1/metrics. +/// Learn about the relationship between this constant and default/spans/logs at +/// +pub const OTEL_EXPORTER_OTLP_METRICS_ENDPOINT: &str = "OTEL_EXPORTER_OTLP_METRICS_ENDPOINT"; +/// Max waiting time for the backend to process each metrics batch, defaults to 10s. +pub const OTEL_EXPORTER_OTLP_METRICS_TIMEOUT: &str = "OTEL_EXPORTER_OTLP_METRICS_TIMEOUT"; + impl OtlpPipeline { /// Create a OTLP metrics pipeline. pub fn metrics( @@ -263,8 +273,20 @@ impl MetricsExporter { mut tonic_config: TonicConfig, export_selector: T, ) -> Result { - let endpoint = - Channel::from_shared(config.endpoint).map_err::(Into::into)?; + let endpoint = match std::env::var(OTEL_EXPORTER_OTLP_METRICS_ENDPOINT) { + Ok(val) => val, + Err(_) => format!("{}{}", config.endpoint, "/v1/metrics"), + }; + + let _timeout = match std::env::var(OTEL_EXPORTER_OTLP_METRICS_TIMEOUT) { + Ok(val) => match u64::from_str(&val) { + Ok(seconds) => Duration::from_secs(seconds), + Err(_) => config.timeout, + }, + Err(_) => config.timeout, + }; + + let endpoint = Channel::from_shared(endpoint).map_err::(Into::into)?; #[cfg(all(feature = "tls"))] let channel = match tonic_config.tls_config { @@ -273,7 +295,7 @@ impl MetricsExporter { .map_err::(Into::into)?, None => endpoint, } - .timeout(config.timeout) + .timeout(_timeout) .connect_lazy(); #[cfg(not(feature = "tls"))] diff --git a/opentelemetry-otlp/src/span.rs b/opentelemetry-otlp/src/span.rs index c1cd803de5..fb8acb2f33 100644 --- a/opentelemetry-otlp/src/span.rs +++ b/opentelemetry-otlp/src/span.rs @@ -5,6 +5,8 @@ use std::fmt::{self, Debug}; use std::time::Duration; +#[cfg(feature = "grpc-tonic")] +use std::str::FromStr; #[cfg(feature = "grpc-tonic")] use { crate::exporter::tonic::{TonicConfig, TonicExporterBuilder}, @@ -30,7 +32,6 @@ use { trace_service::ExportTraceServiceRequest as GrpcRequest, trace_service_grpc::TraceServiceClient as GrpcioTraceServiceClient, }, - std::sync::Arc, }; #[cfg(feature = "http-proto")] @@ -47,7 +48,7 @@ use { }; #[cfg(any(feature = "grpc-sys", feature = "http-proto"))] -use std::collections::HashMap; +use {std::collections::HashMap, std::sync::Arc}; use crate::exporter::ExportConfig; use crate::OtlpPipeline; @@ -64,6 +65,13 @@ use opentelemetry::{ use async_trait::async_trait; +/// Target to which the exporter is going to send spans, defaults to https://localhost:4317/v1/traces. +/// Learn about the relationship between this constant and default/metrics/logs at +/// +pub const OTEL_EXPORTER_OTLP_TRACES_ENDPOINT: &str = "OTEL_EXPORTER_OTLP_TRACES_ENDPOINT"; +/// Max waiting time for the backend to process each spans batch, defaults to 10s. +pub const OTEL_EXPORTER_OTLP_TRACES_TIMEOUT: &str = "OTEL_EXPORTER_OTLP_TRACES_TIMEOUT"; + impl OtlpPipeline { /// Create a OTLP tracing pipeline. pub fn tracing(self) -> OtlpTracePipeline { @@ -269,7 +277,7 @@ pub enum SpanExporter { /// The Collector URL collector_endpoint: Uri, /// The HTTP trace exporter - trace_exporter: Option>, + trace_exporter: Option>, }, } @@ -314,18 +322,31 @@ impl SpanExporter { config: ExportConfig, tonic_config: TonicConfig, ) -> Result { - let endpoint = TonicChannel::from_shared(config.endpoint.clone())?; + let endpoint_str = match std::env::var(OTEL_EXPORTER_OTLP_TRACES_ENDPOINT) { + Ok(val) => val, + Err(_) => format!("{}{}", config.endpoint, "/v1/traces"), + }; + + let endpoint = TonicChannel::from_shared(endpoint_str)?; + + let _timeout = match std::env::var(OTEL_EXPORTER_OTLP_TRACES_TIMEOUT) { + Ok(val) => match u64::from_str(&val) { + Ok(seconds) => Duration::from_secs(seconds), + Err(_) => config.timeout, + }, + Err(_) => config.timeout, + }; #[cfg(feature = "tls")] let channel = match tonic_config.tls_config.as_ref() { Some(tls_config) => endpoint.tls_config(tls_config.clone())?, None => endpoint, } - .timeout(config.timeout) + .timeout(_timeout) .connect_lazy(); #[cfg(not(feature = "tls"))] - let channel = endpoint.timeout(config.timeout).connect_lazy(); + let channel = endpoint.timeout(_timeout).connect_lazy(); SpanExporter::from_tonic_channel(config, tonic_config, channel) } @@ -397,9 +418,74 @@ impl SpanExporter { } } +#[cfg(feature = "grpc-sys")] +async fn grpcio_send_request( + trace_exporter: GrpcioTraceServiceClient, + request: GrpcRequest, + call_options: CallOption, +) -> ExportResult { + let receiver = trace_exporter + .export_async_opt(&request, call_options) + .map_err::(Into::into)?; + receiver.await.map_err::(Into::into)?; + Ok(()) +} + +#[cfg(feature = "tonic")] +async fn tonic_send_request( + trace_exporter: TonicTraceServiceClient, + request: Request, +) -> ExportResult { + trace_exporter + .to_owned() + .export(request) + .await + .map_err::(Into::into)?; + + Ok(()) +} + +#[cfg(feature = "http-proto")] +async fn http_send_request( + batch: Vec, + client: std::sync::Arc, + headers: Option>, + collector_endpoint: Uri, +) -> ExportResult { + let req = ProstRequest { + resource_spans: batch.into_iter().map(Into::into).collect(), + }; + + let mut buf = vec![]; + req.encode(&mut buf) + .map_err::(Into::into)?; + + let mut request = http::Request::builder() + .method(Method::POST) + .uri(collector_endpoint) + .header(CONTENT_TYPE, "application/x-protobuf") + .body(buf) + .map_err::(Into::into)?; + + if let Some(headers) = headers { + for (k, val) in headers { + let value = + HeaderValue::from_str(val.as_ref()).map_err::(Into::into)?; + let key = HeaderName::try_from(&k).map_err::(Into::into)?; + request.headers_mut().insert(key, value); + } + } + + client.send(request).await?; + Ok(()) +} + #[async_trait] impl opentelemetry::sdk::export::trace::SpanExporter for SpanExporter { - async fn export(&mut self, batch: Vec) -> ExportResult { + fn export( + &mut self, + batch: Vec, + ) -> futures::future::BoxFuture<'static, ExportResult> { match self { #[cfg(feature = "grpc-sys")] SpanExporter::Grpcio { @@ -427,11 +513,11 @@ impl opentelemetry::sdk::export::trace::SpanExporter for SpanExporter { call_options = call_options.headers(metadata_builder.build()); } - let receiver = trace_exporter - .export_async_opt(&request, call_options) - .map_err::(Into::into)?; - receiver.await.map_err::(Into::into)?; - Ok(()) + Box::pin(grpcio_send_request( + trace_exporter.clone(), + request, + call_options, + )) } #[cfg(feature = "grpc-tonic")] @@ -457,13 +543,7 @@ impl opentelemetry::sdk::export::trace::SpanExporter for SpanExporter { } } - trace_exporter - .to_owned() - .export(request) - .await - .map_err::(Into::into)?; - - Ok(()) + Box::pin(tonic_send_request(trace_exporter.to_owned(), request)) } #[cfg(feature = "http-proto")] @@ -473,36 +553,16 @@ impl opentelemetry::sdk::export::trace::SpanExporter for SpanExporter { headers, .. } => { - let req = ProstRequest { - resource_spans: batch.into_iter().map(Into::into).collect(), - }; - - let mut buf = vec![]; - req.encode(&mut buf) - .map_err::(Into::into)?; - - let mut request = http::Request::builder() - .method(Method::POST) - .uri(collector_endpoint.clone()) - .header(CONTENT_TYPE, "application/x-protobuf") - .body(buf) - .map_err::(Into::into)?; - - if let Some(headers) = headers.clone() { - for (k, val) in headers { - let value = HeaderValue::from_str(val.as_ref()) - .map_err::(Into::into)?; - let key = - HeaderName::try_from(&k).map_err::(Into::into)?; - request.headers_mut().insert(key, value); - } - } - - if let Some(client) = trace_exporter { - client.send(request).await?; - Ok(()) + if let Some(ref client) = trace_exporter { + let client = Arc::clone(client); + Box::pin(http_send_request( + batch, + client, + headers.clone(), + collector_endpoint.clone(), + )) } else { - Err(crate::Error::NoHttpClient.into()) + Box::pin(std::future::ready(Err(crate::Error::NoHttpClient.into()))) } } } diff --git a/opentelemetry-otlp/src/transform/metrics.rs b/opentelemetry-otlp/src/transform/metrics.rs index e0aaa418b0..093b96c206 100644 --- a/opentelemetry-otlp/src/transform/metrics.rs +++ b/opentelemetry-otlp/src/transform/metrics.rs @@ -14,6 +14,7 @@ pub(crate) mod tonic { ArrayAggregator, HistogramAggregator, LastValueAggregator, MinMaxSumCountAggregator, SumAggregator, }; + use opentelemetry::sdk::InstrumentationLibrary; use opentelemetry_proto::tonic::metrics::v1::DataPointFlags; use opentelemetry_proto::tonic::FromNumber; use opentelemetry_proto::tonic::{ @@ -28,7 +29,6 @@ pub(crate) mod tonic { use crate::to_nanos; use crate::transform::{CheckpointedMetrics, ResourceWrapper}; - use opentelemetry::sdk::InstrumentationLibrary; use std::collections::{BTreeMap, HashMap}; pub(crate) fn record_to_metric( @@ -212,14 +212,21 @@ pub(crate) mod tonic { resource_metrics: sink_map .into_iter() .map(|(resource, metric_map)| ResourceMetrics { + schema_url: resource + .schema_url() + .map(|s| s.to_string()) + .unwrap_or_default(), resource: Some(resource.into()), - schema_url: "".to_string(), // todo: replace with actual schema url. instrumentation_library_metrics: metric_map .into_iter() .map( |(instrumentation_library, metrics)| InstrumentationLibraryMetrics { + schema_url: instrumentation_library + .schema_url + .clone() + .unwrap_or_default() + .to_string(), instrumentation_library: Some(instrumentation_library.into()), - schema_url: "".to_string(), // todo: replace with actual schema url. metrics: metrics .into_iter() .map(|(_k, v)| v) @@ -385,7 +392,7 @@ mod tests { version: instrumentation_version.unwrap_or("").to_string(), }, ), - schema_url: "".to_string(), // todo: replace with actual schema url. + schema_url: "".to_string(), metrics: metrics .into_iter() .map(|(name, data_points)| get_metric_with_name(name, data_points)) @@ -394,7 +401,7 @@ mod tests { } ResourceMetrics { resource: Some(resource), - schema_url: "".to_string(), // todo: replace with actual schema url. + schema_url: "".to_string(), instrumentation_library_metrics, } } @@ -410,7 +417,16 @@ mod tests { // If we changed the sink function to process the input in parallel, we will have to sort other vectors // like data points in Metrics. fn assert_resource_metrics(mut expect: ResourceMetrics, mut actual: ResourceMetrics) { - assert_eq!(expect.resource, actual.resource); + assert_eq!( + expect + .resource + .as_mut() + .map(|r| r.attributes.sort_by_key(|kv| kv.key.to_string())), + actual + .resource + .as_mut() + .map(|r| r.attributes.sort_by_key(|kv| kv.key.to_string())) + ); assert_eq!( expect.instrumentation_library_metrics.len(), actual.instrumentation_library_metrics.len() diff --git a/opentelemetry-otlp/src/transform/resource.rs b/opentelemetry-otlp/src/transform/resource.rs index 013ee840e0..d83816eb2e 100644 --- a/opentelemetry-otlp/src/transform/resource.rs +++ b/opentelemetry-otlp/src/transform/resource.rs @@ -25,6 +25,14 @@ impl PartialOrd for ResourceWrapper { } } +impl ResourceWrapper { + #[cfg(all(feature = "grpc-tonic", feature = "metrics"))] + // it's currently only used by metrics. Trace set this in opentelemtry-proto + pub(crate) fn schema_url(&self) -> Option<&str> { + self.0.schema_url() + } +} + #[cfg(feature = "grpc-tonic")] impl From for Resource { fn from(resource: ResourceWrapper) -> Self { diff --git a/opentelemetry-prometheus/Cargo.toml b/opentelemetry-prometheus/Cargo.toml index 5fd02a1de4..53296e74cd 100644 --- a/opentelemetry-prometheus/Cargo.toml +++ b/opentelemetry-prometheus/Cargo.toml @@ -19,7 +19,7 @@ all-features = true rustdoc-args = ["--cfg", "docsrs"] [dependencies] -opentelemetry = { version = "0.18.0", path = "../opentelemetry", default-features = false, features = ["metrics"] } +opentelemetry = { version = "0.18", path = "../opentelemetry", default-features = false, features = ["metrics"] } prometheus = "0.13" protobuf = "2.14" diff --git a/opentelemetry-proto/Cargo.toml b/opentelemetry-proto/Cargo.toml index 3fbdc31a69..4f35b7e46f 100644 --- a/opentelemetry-proto/Cargo.toml +++ b/opentelemetry-proto/Cargo.toml @@ -47,10 +47,10 @@ build-client = [] [dependencies] grpcio = { version = "0.9", optional = true } -tonic = { version = "0.6.2", optional = true } -prost = { version = "0.9", optional = true } +tonic = { version = "0.7.1", optional = true } +prost = { version = "0.10.1", optional = true } protobuf = { version = "2.18", optional = true } # todo: update to 3.0 so we have docs for generated types. -opentelemetry = { version = "0.18.0", default-features = false, features = ["trace", "metrics"], path = "../opentelemetry" } +opentelemetry = { version = "0.18", default-features = false, features = ["trace", "metrics"], path = "../opentelemetry" } futures = { version = "0.3", default-features = false, features = ["std"] } futures-util = { version = "0.3", default-features = false, features = ["std"] } serde = { version = "1.0", optional = true } @@ -61,5 +61,5 @@ protobuf-codegen = { version = "2.16" } protoc-grpcio = { version = "3.0" } [build-dependencies] -tonic-build = { version = "0.6.2", optional = true } -prost-build = { version = "0.9", optional = true } +tonic-build = { version = "0.7.1", optional = true } +prost-build = { version = "0.10.1", optional = true } diff --git a/opentelemetry-proto/build.rs b/opentelemetry-proto/build.rs index 3211ecbb6a..ed795a21b4 100644 --- a/opentelemetry-proto/build.rs +++ b/opentelemetry-proto/build.rs @@ -11,7 +11,6 @@ fn main() -> Result<(), Error> { tonic_build::configure() .build_server(cfg!(feature = "build-server")) .build_client(cfg!(feature = "build-client")) - .format(false) .compile( &[ "src/proto/opentelemetry-proto/opentelemetry/proto/common/v1/common.proto", diff --git a/opentelemetry-proto/src/transform/common.rs b/opentelemetry-proto/src/transform/common.rs index 590ee0e31b..adc55381aa 100644 --- a/opentelemetry-proto/src/transform/common.rs +++ b/opentelemetry-proto/src/transform/common.rs @@ -62,7 +62,7 @@ pub mod tonic { Value::Bool(val) => Some(any_value::Value::BoolValue(val)), Value::I64(val) => Some(any_value::Value::IntValue(val)), Value::F64(val) => Some(any_value::Value::DoubleValue(val)), - Value::String(val) => Some(any_value::Value::StringValue(val.into_owned())), + Value::String(val) => Some(any_value::Value::StringValue(val.to_string())), Value::Array(array) => Some(any_value::Value::ArrayValue(match array { Array::Bool(vals) => array_into_proto(vals), Array::I64(vals) => array_into_proto(vals), @@ -144,7 +144,7 @@ pub mod grpcio { Value::Bool(val) => any_value.set_bool_value(val), Value::I64(val) => any_value.set_int_value(val), Value::F64(val) => any_value.set_double_value(val), - Value::String(val) => any_value.set_string_value(val.into_owned()), + Value::String(val) => any_value.set_string_value(val.to_string()), Value::Array(array) => any_value.set_array_value(match array { Array::Bool(vals) => array_into_proto(vals), Array::I64(vals) => array_into_proto(vals), diff --git a/opentelemetry-proto/src/transform/traces.rs b/opentelemetry-proto/src/transform/traces.rs index 57a9c034bf..f8990b19ec 100644 --- a/opentelemetry-proto/src/transform/traces.rs +++ b/opentelemetry-proto/src/transform/traces.rs @@ -51,13 +51,14 @@ pub mod tonic { let span_kind: span::SpanKind = source_span.span_kind.into(); ResourceSpans { resource: Some(Resource { - attributes: resource_attributes( - source_span.resource.as_ref().map(AsRef::as_ref), - ) - .0, + attributes: resource_attributes(&source_span.resource).0, dropped_attributes_count: 0, }), - schema_url: "".to_string(), // todo: replace with actual schema url. + schema_url: source_span + .resource + .schema_url() + .map(|url| url.to_string()) + .unwrap_or_default(), instrumentation_library_spans: vec![InstrumentationLibrarySpans { schema_url: source_span .instrumentation_lib @@ -109,14 +110,11 @@ pub mod tonic { } } - fn resource_attributes(resource: Option<&sdk::Resource>) -> Attributes { + fn resource_attributes(resource: &sdk::Resource) -> Attributes { resource - .map(|res| { - res.iter() - .map(|(k, v)| opentelemetry::KeyValue::new(k.clone(), v.clone())) - .collect::>() - }) - .unwrap_or_default() + .iter() + .map(|(k, v)| opentelemetry::KeyValue::new(k.clone(), v.clone())) + .collect::>() .into() } } @@ -172,10 +170,7 @@ pub mod grpcio { fn from(source_span: SpanData) -> Self { ResourceSpans { resource: SingularPtrField::from(Some(Resource { - attributes: resource_attributes( - source_span.resource.as_ref().map(AsRef::as_ref), - ) - .0, + attributes: resource_attributes(&source_span.resource).0, dropped_attributes_count: 0, ..Default::default() })), @@ -243,15 +238,11 @@ pub mod grpcio { } } - fn resource_attributes(resource: Option<&sdk::Resource>) -> Attributes { + fn resource_attributes(resource: &sdk::Resource) -> Attributes { resource - .map(|resource| { - resource - .iter() - .map(|(k, v)| opentelemetry::KeyValue::new(k.clone(), v.clone())) - .collect::>() - }) - .unwrap_or_default() + .iter() + .map(|(k, v)| opentelemetry::KeyValue::new(k.clone(), v.clone())) + .collect::>() .into() } } diff --git a/opentelemetry-sdk/Cargo.toml b/opentelemetry-sdk/Cargo.toml index 20862cc9a7..71ffe751fb 100644 --- a/opentelemetry-sdk/Cargo.toml +++ b/opentelemetry-sdk/Cargo.toml @@ -1,26 +1,31 @@ [package] name = "opentelemetry-sdk" version = "0.18.0" +license = "Apache-2.0" edition = "2018" [dependencies] -opentelemetry-api = { version = "0.18.0", path = "../opentelemetry-api/" } +opentelemetry-api = { version = "0.18", path = "../opentelemetry-api/" } +opentelemetry-http = { version = "0.7.0", path = "../opentelemetry-http", optional = true } async-std = { version = "1.6", features = ["unstable"], optional = true } async-trait = { version = "0.1", optional = true } -dashmap = { version = "4.0.1", optional = true } +crossbeam-channel = { version = "0.5", optional = true } +dashmap = { version = "=5.1.0", optional = true } fnv = { version = "1.0", optional = true } futures-channel = "0.3" futures-executor = "0.3" -futures-util = { version = "0.3", default-features = false, features = ["std", "sink"] } -lazy_static = "1.4" +futures-util = { version = "0.3", default-features = false, features = ["std", "sink", "async-await-macro"] } +once_cell = "1.10" percent-encoding = { version = "2.0", optional = true } pin-project = { version = "1.0.2", optional = true } rand = { version = "0.8", default-features = false, features = ["std", "std_rng"], optional = true } serde = { version = "1.0", features = ["derive", "rc"], optional = true } +serde_json = { version = "1", optional = true } thiserror = "1" +url = { version = "2.2", optional = true } tokio = { version = "1.0", default-features = false, features = ["rt", "time"], optional = true } tokio-stream = { version = "0.1", optional = true } -crossbeam-channel = { version = "0.5", optional = true } +http = { version = "0.2", optional = true } [package.metadata.docs.rs] all-features = true @@ -34,6 +39,7 @@ rand_distr = "0.4.0" [features] default = ["trace"] trace = ["opentelemetry-api/trace", "crossbeam-channel", "rand", "pin-project", "async-trait", "percent-encoding"] +jaeger_remote_sampler = ["trace", "opentelemetry-http", "http", "serde", "serde_json", "url"] metrics = ["opentelemetry-api/metrics", "dashmap", "fnv"] testing = ["opentelemetry-api/testing", "trace", "metrics", "rt-async-std", "rt-tokio", "rt-tokio-current-thread", "tokio/macros", "tokio/rt-multi-thread"] rt-tokio = ["tokio", "tokio-stream"] diff --git a/opentelemetry-sdk/benches/batch_span_processor.rs b/opentelemetry-sdk/benches/batch_span_processor.rs index 1d872739cb..f4ef6e0ed0 100644 --- a/opentelemetry-sdk/benches/batch_span_processor.rs +++ b/opentelemetry-sdk/benches/batch_span_processor.rs @@ -6,6 +6,8 @@ use opentelemetry_sdk::export::trace::SpanData; use opentelemetry_sdk::runtime::Tokio; use opentelemetry_sdk::testing::trace::NoopSpanExporter; use opentelemetry_sdk::trace::{BatchSpanProcessor, EvictedHashMap, EvictedQueue, SpanProcessor}; +use opentelemetry_sdk::Resource; +use std::borrow::Cow; use std::sync::Arc; use std::time::SystemTime; use tokio::runtime::Runtime; @@ -30,7 +32,7 @@ fn get_span_data() -> Vec { events: EvictedQueue::new(12), links: EvictedQueue::new(12), status: Status::Unset, - resource: None, + resource: Cow::Owned(Resource::empty()), instrumentation_lib: Default::default(), }) .collect::>() diff --git a/opentelemetry-sdk/benches/trace.rs b/opentelemetry-sdk/benches/trace.rs index 70fbc6edd0..4d75e7493c 100644 --- a/opentelemetry-sdk/benches/trace.rs +++ b/opentelemetry-sdk/benches/trace.rs @@ -1,4 +1,5 @@ use criterion::{criterion_group, criterion_main, Criterion}; +use futures_util::future::BoxFuture; use opentelemetry_api::{ trace::{Span, Tracer, TracerProvider}, Key, KeyValue, @@ -100,10 +101,9 @@ fn insert_keys(mut map: sdktrace::EvictedHashMap, n: usize) { #[derive(Debug)] struct VoidExporter; -#[async_trait::async_trait] impl SpanExporter for VoidExporter { - async fn export(&mut self, _spans: Vec) -> ExportResult { - Ok(()) + fn export(&mut self, _spans: Vec) -> BoxFuture<'static, ExportResult> { + Box::pin(futures_util::future::ready(Ok(()))) } } diff --git a/opentelemetry-sdk/src/export/metrics/stdout.rs b/opentelemetry-sdk/src/export/metrics/stdout.rs index aaf1ae722d..4668c57dbb 100644 --- a/opentelemetry-sdk/src/export/metrics/stdout.rs +++ b/opentelemetry-sdk/src/export/metrics/stdout.rs @@ -94,7 +94,7 @@ where let encoded_inst_attributes = if !desc.instrumentation_name().is_empty() { let inst_attributes = AttributeSet::from_attributes(iter::once(KeyValue::new( "instrumentation.name", - desc.instrumentation_name().to_owned(), + desc.instrumentation_name(), ))); inst_attributes.encoded(Some(self.attribute_encoder.as_ref())) } else { diff --git a/opentelemetry-sdk/src/export/trace/mod.rs b/opentelemetry-sdk/src/export/trace/mod.rs index 5460afe017..25c09c32d7 100644 --- a/opentelemetry-sdk/src/export/trace/mod.rs +++ b/opentelemetry-sdk/src/export/trace/mod.rs @@ -1,9 +1,9 @@ //! Trace exporters -use async_trait::async_trait; +use crate::Resource; +use futures_util::future::BoxFuture; use opentelemetry_api::trace::{Event, Link, SpanContext, SpanId, SpanKind, Status, TraceError}; use std::borrow::Cow; use std::fmt::Debug; -use std::sync::Arc; use std::time::SystemTime; pub mod stdout; @@ -18,7 +18,6 @@ pub type ExportResult = Result<(), TraceError>; /// The goal of the interface is to minimize burden of implementation for /// protocol-dependent telemetry exporters. The protocol exporter is expected to /// be primarily a simple telemetry data encoder and transmitter. -#[async_trait] pub trait SpanExporter: Send + Debug { /// Exports a batch of readable spans. Protocol exporters that will /// implement this function are typically expected to serialize and transmit @@ -32,7 +31,7 @@ pub trait SpanExporter: Send + Debug { /// /// Any retry logic that is required by the exporter is the responsibility /// of the exporter. - async fn export(&mut self, batch: Vec) -> ExportResult; + fn export(&mut self, batch: Vec) -> BoxFuture<'static, ExportResult>; /// Shuts down the exporter. Called when SDK is shut down. This is an /// opportunity for exporter to do any cleanup required. @@ -73,7 +72,7 @@ pub struct SpanData { /// Span status pub status: Status, /// Resource contains attributes representing an entity that produced this span. - pub resource: Option>, + pub resource: Cow<'static, Resource>, /// Instrumentation library that produced this span pub instrumentation_lib: crate::InstrumentationLibrary, } diff --git a/opentelemetry-sdk/src/export/trace/stdout.rs b/opentelemetry-sdk/src/export/trace/stdout.rs index c88c897ee7..68bd0b172c 100644 --- a/opentelemetry-sdk/src/export/trace/stdout.rs +++ b/opentelemetry-sdk/src/export/trace/stdout.rs @@ -32,6 +32,7 @@ use crate::export::{ ExportError, }; use async_trait::async_trait; +use futures_util::future::BoxFuture; use opentelemetry_api::{global, trace::TracerProvider}; use std::fmt::Debug; use std::io::{stdout, Stdout, Write}; @@ -133,20 +134,26 @@ where W: Write + Debug + Send + 'static, { /// Export spans to stdout - async fn export(&mut self, batch: Vec) -> ExportResult { + fn export(&mut self, batch: Vec) -> BoxFuture<'static, ExportResult> { for span in batch { if self.pretty_print { - self.writer + if let Err(err) = self + .writer .write_all(format!("{:#?}\n", span).as_bytes()) - .map_err(|err| TraceError::ExportFailed(Box::new(Error::from(err))))?; - } else { - self.writer - .write_all(format!("{:?}\n", span).as_bytes()) - .map_err(|err| TraceError::ExportFailed(Box::new(Error::from(err))))?; + .map_err(|err| TraceError::ExportFailed(Box::new(Error::from(err)))) + { + return Box::pin(std::future::ready(Err(Into::into(err)))); + } + } else if let Err(err) = self + .writer + .write_all(format!("{:?}\n", span).as_bytes()) + .map_err(|err| TraceError::ExportFailed(Box::new(Error::from(err)))) + { + return Box::pin(std::future::ready(Err(Into::into(err)))); } } - Ok(()) + Box::pin(std::future::ready(Ok(()))) } } diff --git a/opentelemetry-sdk/src/metrics/controllers/push.rs b/opentelemetry-sdk/src/metrics/controllers/push.rs index 39a83e795c..f74c522060 100644 --- a/opentelemetry-sdk/src/metrics/controllers/push.rs +++ b/opentelemetry-sdk/src/metrics/controllers/push.rs @@ -19,9 +19,7 @@ use std::pin::Pin; use std::sync::{Arc, Mutex}; use std::time; -lazy_static::lazy_static! { - static ref DEFAULT_PUSH_PERIOD: time::Duration = time::Duration::from_secs(10); -} +const DEFAULT_PUSH_PERIOD: time::Duration = time::Duration::from_secs(10); /// Create a new `PushControllerBuilder`. pub fn push( @@ -183,14 +181,14 @@ where let (message_sender, message_receiver) = mpsc::channel(256); let ticker = - (self.interval)(self.period.unwrap_or(*DEFAULT_PUSH_PERIOD)).map(|_| PushMessage::Tick); + (self.interval)(self.period.unwrap_or(DEFAULT_PUSH_PERIOD)).map(|_| PushMessage::Tick); (self.spawn)(PushControllerWorker { messages: Box::pin(select(message_receiver, ticker)), accumulator, processor, exporter: self.exporter, - _timeout: self.timeout.unwrap_or(*DEFAULT_PUSH_PERIOD), + _timeout: self.timeout.unwrap_or(DEFAULT_PUSH_PERIOD), }); PushController { diff --git a/opentelemetry-sdk/src/propagation/baggage.rs b/opentelemetry-sdk/src/propagation/baggage.rs index 8b16c8eff1..6fd6600c2f 100644 --- a/opentelemetry-sdk/src/propagation/baggage.rs +++ b/opentelemetry-sdk/src/propagation/baggage.rs @@ -40,6 +40,7 @@ //! assert!(header_value.contains("user_id=1"), "still contains previous name-value"); //! assert!(header_value.contains("server_id=42"), "contains new name-value pair"); //! ``` +use once_cell::sync::Lazy; use opentelemetry_api::{ baggage::{BaggageExt, KeyValueMetadata}, propagation::{text_map_propagator::FieldIter, Extractor, Injector, TextMapPropagator}, @@ -50,10 +51,7 @@ use std::iter; static BAGGAGE_HEADER: &str = "baggage"; const FRAGMENT: &AsciiSet = &CONTROLS.add(b' ').add(b'"').add(b';').add(b',').add(b'='); - -lazy_static::lazy_static! { - static ref BAGGAGE_FIELDS: [String; 1] = [BAGGAGE_HEADER.to_string()]; -} +static BAGGAGE_FIELDS: Lazy<[String; 1]> = Lazy::new(|| [BAGGAGE_HEADER.to_owned()]); /// Propagates name-value pairs in [W3C Baggage] format. /// @@ -184,9 +182,8 @@ impl TextMapPropagator for BaggagePropagator { mod tests { use super::*; use opentelemetry_api::{ - baggage::BaggageMetadata, propagation::TextMapPropagator, Key, KeyValue, Value, + baggage::BaggageMetadata, propagation::TextMapPropagator, Key, KeyValue, StringValue, Value, }; - use std::borrow::Cow; use std::collections::HashMap; #[rustfmt::skip] @@ -247,7 +244,7 @@ mod tests { vec![ KeyValue::new("key1", Value::Array(vec![true, false].into())), KeyValue::new("key2", Value::Array(vec![123, 456].into())), - KeyValue::new("key3", Value::Array(vec![Cow::from("val1"), Cow::from("val2")].into())), + KeyValue::new("key3", Value::Array(vec![StringValue::from("val1"), StringValue::from("val2")].into())), ], vec![ "key1=[true%2Cfalse]", diff --git a/opentelemetry-sdk/src/propagation/trace_context.rs b/opentelemetry-sdk/src/propagation/trace_context.rs index a27e0108c5..9ada5a3168 100644 --- a/opentelemetry-sdk/src/propagation/trace_context.rs +++ b/opentelemetry-sdk/src/propagation/trace_context.rs @@ -17,6 +17,7 @@ //! See the [w3c trace-context docs] for more details. //! //! [w3c trace-context docs]: https://w3c.github.io/trace-context/ +use once_cell::sync::Lazy; use opentelemetry_api::{ propagation::{text_map_propagator::FieldIter, Extractor, Injector, TextMapPropagator}, trace::{SpanContext, SpanId, TraceContextExt, TraceFlags, TraceId, TraceState}, @@ -29,12 +30,8 @@ const MAX_VERSION: u8 = 254; const TRACEPARENT_HEADER: &str = "traceparent"; const TRACESTATE_HEADER: &str = "tracestate"; -lazy_static::lazy_static! { - static ref TRACE_CONTEXT_HEADER_FIELDS: [String; 2] = [ - TRACEPARENT_HEADER.to_string(), - TRACESTATE_HEADER.to_string() - ]; -} +static TRACE_CONTEXT_HEADER_FIELDS: Lazy<[String; 2]> = + Lazy::new(|| [TRACEPARENT_HEADER.to_owned(), TRACESTATE_HEADER.to_owned()]); /// Propagates `SpanContext`s in [W3C TraceContext] format. /// diff --git a/opentelemetry-sdk/src/resource/mod.rs b/opentelemetry-sdk/src/resource/mod.rs index 48e1b0ee50..b443773059 100644 --- a/opentelemetry-sdk/src/resource/mod.rs +++ b/opentelemetry-sdk/src/resource/mod.rs @@ -30,21 +30,26 @@ pub use process::ProcessResourceDetector; #[cfg(feature = "metrics")] use opentelemetry_api::attributes; use opentelemetry_api::{Key, KeyValue, Value}; -use std::collections::{btree_map, BTreeMap}; +use std::borrow::Cow; +use std::collections::{hash_map, HashMap}; use std::ops::Deref; use std::time::Duration; /// An immutable representation of the entity producing telemetry as attributes. #[derive(Clone, Debug, PartialEq)] pub struct Resource { - attrs: BTreeMap, + attrs: HashMap, + schema_url: Option>, } impl Default for Resource { fn default() -> Self { Self::from_detectors( Duration::from_secs(0), - vec![Box::new(EnvResourceDetector::new())], + vec![ + Box::new(SdkProvidedResourceDetector), + Box::new(EnvResourceDetector::new()), + ], ) } } @@ -54,6 +59,7 @@ impl Resource { pub fn empty() -> Self { Self { attrs: Default::default(), + schema_url: None, } } @@ -71,6 +77,24 @@ impl Resource { resource } + /// Create a new `Resource` from a key value pairs and [schema url]. + /// + /// Values are de-duplicated by key, and the first key-value pair with a non-empty string value + /// will be retained. + /// + /// schema_url must be a valid URL using HTTP or HTTPS protocol. + /// + /// [schema url]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.9.0/specification/schemas/overview.md#schema-url + pub fn from_schema_url(kvs: KV, schema_url: S) -> Self + where + KV: IntoIterator, + S: Into>, + { + let mut resource = Self::new(kvs); + resource.schema_url = Some(schema_url.into()); + resource + } + /// Create a new `Resource` from resource detectors. /// /// timeout will be applied to each detector. @@ -89,8 +113,19 @@ impl Resource { /// Create a new `Resource` by combining two resources. /// + /// ### Key value pairs /// Keys from the `other` resource have priority over keys from this resource, even if the /// updated value is empty. + /// + /// ### [Schema url] + /// If both of the resource are not empty. Schema url is determined by the following rules, in order: + /// 1. If this resource has a schema url, it will be used. + /// 2. If this resource does not have a schema url, and the other resource has a schema url, it will be used. + /// 3. If both resources have a schema url and it's the same, it will be used. + /// 4. If both resources have a schema url and it's different, the schema url will be empty. + /// 5. If both resources do not have a schema url, the schema url will be empty. + /// + /// [Schema url]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.9.0/specification/schemas/overview.md#schema-url pub fn merge>(&self, other: T) -> Self { if self.attrs.is_empty() { return other.clone(); @@ -109,9 +144,31 @@ impl Resource { resource.attrs.insert(k.clone(), v.clone()); } + if self.schema_url == other.schema_url { + resource.schema_url = self.schema_url.clone(); + } else if self.schema_url.is_none() { + // if the other resource has schema url, use it. + if other.schema_url.is_some() { + resource.schema_url = other.schema_url.clone(); + } + // else empty schema url. + } else { + // if self has schema url, use it. + if other.schema_url.is_none() { + resource.schema_url = self.schema_url.clone(); + } + } + resource } + /// Return the [schema url] of the resource. If the resource does not have a schema url, return `None`. + /// + /// [schema url]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.9.0/specification/schemas/overview.md#schema-url + pub fn schema_url(&self) -> Option<&str> { + self.schema_url.as_ref().map(|s| s.as_ref()) + } + /// Returns the number of attributes for this resource pub fn len(&self) -> usize { self.attrs.len() @@ -142,7 +199,7 @@ impl Resource { /// An owned iterator over the entries of a `Resource`. #[derive(Debug)] -pub struct IntoIter(btree_map::IntoIter); +pub struct IntoIter(hash_map::IntoIter); impl Iterator for IntoIter { type Item = (Key, Value); @@ -163,7 +220,7 @@ impl IntoIterator for Resource { /// An iterator over the entries of a `Resource`. #[derive(Debug)] -pub struct Iter<'a>(btree_map::Iter<'a, Key, Value>); +pub struct Iter<'a>(hash_map::Iter<'a, Key, Value>); impl<'a> Iterator for Iter<'a> { type Item = (&'a Key, &'a Value); @@ -202,26 +259,27 @@ pub trait ResourceDetector { mod tests { use super::*; use crate::resource::EnvResourceDetector; - use std::collections::BTreeMap; + use std::collections::HashMap; use std::{env, time}; #[test] fn new_resource() { let args_with_dupe_keys = vec![KeyValue::new("a", ""), KeyValue::new("a", "final")]; - let mut expected_attrs = BTreeMap::new(); + let mut expected_attrs = HashMap::new(); expected_attrs.insert(Key::new("a"), Value::from("final")); assert_eq!( Resource::new(args_with_dupe_keys), Resource { - attrs: expected_attrs + attrs: expected_attrs, + schema_url: None, } ); } #[test] - fn merge_resource() { + fn merge_resource_key_value_pairs() { let resource_a = Resource::new(vec![ KeyValue::new("a", ""), KeyValue::new("b", "b-value"), @@ -234,7 +292,7 @@ mod tests { KeyValue::new("d", ""), ]); - let mut expected_attrs = BTreeMap::new(); + let mut expected_attrs = HashMap::new(); expected_attrs.insert(Key::new("a"), Value::from("a-value")); expected_attrs.insert(Key::new("b"), Value::from("b-value")); expected_attrs.insert(Key::new("c"), Value::from("c-value")); @@ -243,11 +301,47 @@ mod tests { assert_eq!( resource_a.merge(&resource_b), Resource { - attrs: expected_attrs + attrs: expected_attrs, + schema_url: None, } ); } + #[test] + fn merge_resource_schema_url() { + // if both resources contains key value pairs + let test_cases = vec![ + (Some("http://schema/a"), None, Some("http://schema/a")), + (Some("http://schema/a"), Some("http://schema/b"), None), + (None, Some("http://schema/b"), Some("http://schema/b")), + ( + Some("http://schema/a"), + Some("http://schema/a"), + Some("http://schema/a"), + ), + (None, None, None), + ]; + + for (schema_url, other_schema_url, expect_schema_url) in test_cases.into_iter() { + let mut resource = Resource::new(vec![KeyValue::new("key", "")]); + resource.schema_url = schema_url.map(Into::into); + + let mut other_resource = Resource::new(vec![KeyValue::new("key", "")]); + other_resource.schema_url = other_schema_url.map(Into::into); + + assert_eq!( + resource.merge(&other_resource).schema_url, + expect_schema_url.map(Into::into) + ); + } + + // if only one resource contains key value pairs + let resource = Resource::from_schema_url(vec![], "http://schema/a"); + let other_resource = Resource::new(vec![KeyValue::new("key", "")]); + + assert_eq!(resource.merge(&other_resource).schema_url, None); + } + #[test] fn detect_resource() { env::set_var("OTEL_RESOURCE_ATTRIBUTES", "key=value, k = v , a= x, a=z"); @@ -262,7 +356,7 @@ mod tests { KeyValue::new("key", "value"), KeyValue::new("k", "v"), KeyValue::new("a", "x"), - KeyValue::new("a", "z") + KeyValue::new("a", "z"), ]) ) } diff --git a/opentelemetry-sdk/src/resource/process.rs b/opentelemetry-sdk/src/resource/process.rs index 5d74a73a81..1c1a8d7154 100644 --- a/opentelemetry-sdk/src/resource/process.rs +++ b/opentelemetry-sdk/src/resource/process.rs @@ -4,8 +4,7 @@ use crate::resource::ResourceDetector; use crate::Resource; -use opentelemetry_api::{Array, KeyValue, Value}; -use std::borrow::Cow; +use opentelemetry_api::{KeyValue, StringValue, Value}; use std::env::args_os; use std::process::id; use std::time::Duration; @@ -25,13 +24,10 @@ impl ResourceDetector for ProcessResourceDetector { let arguments = args_os(); let cmd_arg_val = arguments .into_iter() - .map(|arg| Cow::from(arg.to_string_lossy().into_owned())) - .collect::>>(); + .map(|arg| arg.to_string_lossy().into_owned().into()) + .collect::>(); Resource::new(vec![ - KeyValue::new( - "process.command_args", - Value::Array(Array::String(cmd_arg_val)), - ), + KeyValue::new("process.command_args", Value::Array(cmd_arg_val.into())), KeyValue::new("process.pid", id() as i64), ]) } diff --git a/opentelemetry-sdk/src/runtime.rs b/opentelemetry-sdk/src/runtime.rs index c958d19245..9f1e3b71b1 100644 --- a/opentelemetry-sdk/src/runtime.rs +++ b/opentelemetry-sdk/src/runtime.rs @@ -21,7 +21,7 @@ pub trait Runtime: Clone + Send + Sync + 'static { /// A future, which resolves after a previously specified amount of time. The output type is /// not important. - type Delay: Future + Send; + type Delay: Future + Send + Unpin; /// Create a [Stream][futures_util::stream::Stream], which returns a new item every /// [Duration][std::time::Duration]. @@ -52,7 +52,7 @@ pub struct Tokio; #[cfg_attr(docsrs, doc(cfg(feature = "rt-tokio")))] impl Runtime for Tokio { type Interval = tokio_stream::wrappers::IntervalStream; - type Delay = tokio::time::Sleep; + type Delay = ::std::pin::Pin>; fn interval(&self, duration: Duration) -> Self::Interval { crate::util::tokio_interval_stream(duration) @@ -63,7 +63,7 @@ impl Runtime for Tokio { } fn delay(&self, duration: Duration) -> Self::Delay { - tokio::time::sleep(duration) + Box::pin(tokio::time::sleep(duration)) } } @@ -77,7 +77,7 @@ pub struct TokioCurrentThread; #[cfg_attr(docsrs, doc(cfg(feature = "rt-tokio-current-thread")))] impl Runtime for TokioCurrentThread { type Interval = tokio_stream::wrappers::IntervalStream; - type Delay = tokio::time::Sleep; + type Delay = ::std::pin::Pin>; fn interval(&self, duration: Duration) -> Self::Interval { crate::util::tokio_interval_stream(duration) @@ -100,7 +100,7 @@ impl Runtime for TokioCurrentThread { } fn delay(&self, duration: Duration) -> Self::Delay { - tokio::time::sleep(duration) + Box::pin(tokio::time::sleep(duration)) } } diff --git a/opentelemetry-sdk/src/testing/trace.rs b/opentelemetry-sdk/src/testing/trace.rs index 03781fe66d..5d5b338a9b 100644 --- a/opentelemetry-sdk/src/testing/trace.rs +++ b/opentelemetry-sdk/src/testing/trace.rs @@ -7,6 +7,7 @@ use crate::{ InstrumentationLibrary, }; use async_trait::async_trait; +use futures_util::future::BoxFuture; pub use opentelemetry_api::testing::trace::TestSpan; use opentelemetry_api::trace::{SpanContext, SpanId, SpanKind, Status}; use std::fmt::{Display, Formatter}; @@ -38,13 +39,17 @@ pub struct TestSpanExporter { #[async_trait] impl SpanExporter for TestSpanExporter { - async fn export(&mut self, batch: Vec) -> ExportResult { + fn export(&mut self, batch: Vec) -> BoxFuture<'static, ExportResult> { for span_data in batch { - self.tx_export + if let Err(err) = self + .tx_export .send(span_data) - .map_err::(Into::into)?; + .map_err::(Into::into) + { + return Box::pin(std::future::ready(Err(Into::into(err)))); + } } - Ok(()) + Box::pin(std::future::ready(Ok(()))) } fn shutdown(&mut self) { @@ -68,15 +73,18 @@ pub struct TokioSpanExporter { tx_shutdown: tokio::sync::mpsc::UnboundedSender<()>, } -#[async_trait] impl SpanExporter for TokioSpanExporter { - async fn export(&mut self, batch: Vec) -> ExportResult { + fn export(&mut self, batch: Vec) -> BoxFuture<'static, ExportResult> { for span_data in batch { - self.tx_export + if let Err(err) = self + .tx_export .send(span_data) - .map_err::(Into::into)?; + .map_err::(Into::into) + { + return Box::pin(std::future::ready(Err(Into::into(err)))); + } } - Ok(()) + Box::pin(std::future::ready(Ok(()))) } fn shutdown(&mut self) { @@ -144,7 +152,7 @@ impl NoopSpanExporter { #[async_trait::async_trait] impl SpanExporter for NoopSpanExporter { - async fn export(&mut self, _batch: Vec) -> ExportResult { - Ok(()) + fn export(&mut self, _: Vec) -> BoxFuture<'static, ExportResult> { + Box::pin(std::future::ready(Ok(()))) } } diff --git a/opentelemetry-sdk/src/trace/config.rs b/opentelemetry-sdk/src/trace/config.rs index c4a7f07aaf..65a8daee23 100644 --- a/opentelemetry-sdk/src/trace/config.rs +++ b/opentelemetry-sdk/src/trace/config.rs @@ -3,10 +3,11 @@ //! Configuration represents the global tracing configuration, overrides //! can be set for the default OpenTelemetry limits and Sampler. use crate::trace::{span_limit::SpanLimits, IdGenerator, RandomIdGenerator, Sampler, ShouldSample}; +use crate::Resource; use opentelemetry_api::global::{handle_error, Error}; +use std::borrow::Cow; use std::env; use std::str::FromStr; -use std::sync::Arc; /// Default trace configuration pub fn config() -> Config { @@ -18,12 +19,15 @@ pub fn config() -> Config { pub struct Config { /// The sampler that the sdk should use pub sampler: Box, + /// The id generator that the sdk should use pub id_generator: Box, + /// span limits pub span_limits: SpanLimits, + /// Contains attributes representing an entity that produces telemetry. - pub resource: Option>, + pub resource: Cow<'static, Resource>, } impl Config { @@ -76,20 +80,10 @@ impl Config { } /// Specify the attributes representing the entity that produces telemetry - pub fn with_resource(mut self, resource: crate::Resource) -> Self { - self.resource = Some(Arc::new(resource)); + pub fn with_resource(mut self, resource: Resource) -> Self { + self.resource = Cow::Owned(resource); self } - - /// Use empty resource instead of default resource in this config. - /// - /// Usually if no resource is provided, SDK will assign a default resource - /// to the `TracerProvider`, which could impact the performance. Performance - /// sensitive application can use function to disable such behavior and assign - /// no resource to `TracerProvider`. - pub fn with_no_resource(self) -> Self { - self.with_resource(crate::Resource::empty()) - } } impl Default for Config { @@ -99,7 +93,7 @@ impl Default for Config { sampler: Box::new(Sampler::ParentBased(Box::new(Sampler::AlwaysOn))), id_generator: Box::new(RandomIdGenerator::default()), span_limits: SpanLimits::default(), - resource: None, + resource: Cow::Owned(Resource::default()), }; if let Some(max_attributes_per_span) = env::var("OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT") diff --git a/opentelemetry-sdk/src/trace/mod.rs b/opentelemetry-sdk/src/trace/mod.rs index 520f64f0c1..4daa6cc1d2 100644 --- a/opentelemetry-sdk/src/trace/mod.rs +++ b/opentelemetry-sdk/src/trace/mod.rs @@ -32,3 +32,6 @@ pub use span_processor::{ SpanProcessor, }; pub use tracer::Tracer; + +#[cfg(feature = "jaeger_remote_sampler")] +pub use sampler::JaegerRemoteSamplerBuilder; diff --git a/opentelemetry-sdk/src/trace/provider.rs b/opentelemetry-sdk/src/trace/provider.rs index 824fe43c6e..5a5736271c 100644 --- a/opentelemetry-sdk/src/trace/provider.rs +++ b/opentelemetry-sdk/src/trace/provider.rs @@ -8,17 +8,17 @@ //! propagators) are provided by the `TracerProvider`. `Tracer` instances do //! not duplicate this data to avoid that different `Tracer` instances //! of the `TracerProvider` have different versions of these data. -use crate::resource::{EnvResourceDetector, SdkProvidedResourceDetector}; use crate::trace::{runtime::TraceRuntime, BatchSpanProcessor, SimpleSpanProcessor, Tracer}; use crate::{export::trace::SpanExporter, trace::SpanProcessor}; use crate::{InstrumentationLibrary, Resource}; +use once_cell::sync::OnceCell; use opentelemetry_api::{global, trace::TraceResult}; use std::borrow::Cow; use std::sync::Arc; -use std::time::Duration; /// Default tracer name if empty string is provided. const DEFAULT_COMPONENT_NAME: &str = "rust.opentelemetry.io/sdk/tracer"; +static PROVIDER_RESOURCE: OnceCell = OnceCell::new(); /// TracerProvider inner type #[derive(Debug)] @@ -144,27 +144,10 @@ impl opentelemetry_api::trace::TracerProvider for TracerProvider { } /// Builder for provider attributes. -#[derive(Debug)] +#[derive(Debug, Default)] pub struct Builder { processors: Vec>, config: crate::trace::Config, - sdk_provided_resource: Resource, -} - -impl Default for Builder { - fn default() -> Self { - Builder { - processors: Default::default(), - config: Default::default(), - sdk_provided_resource: Resource::from_detectors( - Duration::from_secs(0), - vec![ - Box::new(SdkProvidedResourceDetector), - Box::new(EnvResourceDetector::new()), - ], - ), - } - } } impl Builder { @@ -199,28 +182,29 @@ impl Builder { Builder { config, ..self } } - /// Return the clone of sdk provided resource. - /// - /// See - /// for details. - pub fn sdk_provided_resource(&self) -> Resource { - self.sdk_provided_resource.clone() - } - /// Create a new provider from this configuration. pub fn build(self) -> TracerProvider { let mut config = self.config; - config.resource = match config.resource { - None => Some(Arc::new(self.sdk_provided_resource)), - // User provided resource information has higher priority. - Some(resource) => { - if resource.is_empty() { - None - } else { - Some(Arc::new(self.sdk_provided_resource.merge(resource))) + + // Standard config will contain an owned `Resource` (either sdk default or use supplied) + // we can optimize the common case with a static ref to avoid cloning the underlying + // resource data for each span. + // + // For the uncommon case where there are multiple tracer providers with different resource + // configurations, users can optionally provide their own borrowed static resource. + if matches!(config.resource, Cow::Owned(_)) { + config.resource = match PROVIDER_RESOURCE.try_insert(config.resource.into_owned()) { + Ok(static_resource) => Cow::Borrowed(static_resource), + Err((prev, new)) => { + if prev == &new { + Cow::Borrowed(prev) + } else { + Cow::Owned(new) + } } } - }; + } + TracerProvider { inner: Arc::new(TracerProviderInner { processors: self.processors, @@ -238,6 +222,7 @@ mod tests { use crate::Resource; use opentelemetry_api::trace::{TraceError, TraceResult}; use opentelemetry_api::{Context, Key, KeyValue}; + use std::borrow::Cow; use std::env; use std::sync::Arc; @@ -288,9 +273,11 @@ mod tests { let assert_service_name = |provider: super::TracerProvider, expect: Option<&'static str>| { assert_eq!( - provider.config().resource.as_ref().and_then(|r| r + provider + .config() + .resource .get(Key::from_static_str("service.name")) - .map(|v| v.to_string())), + .map(|v| v.to_string()), expect.map(|s| s.to_string()) ); }; @@ -300,10 +287,10 @@ mod tests { // If user didn't provided a resource, try to get a default from env var let custom_config_provider = super::TracerProvider::builder() .with_config(Config { - resource: Some(Arc::new(Resource::new(vec![KeyValue::new( + resource: Cow::Owned(Resource::new(vec![KeyValue::new( "service.name", "test_service", - )]))), + )])), ..Default::default() }) .build(); @@ -314,11 +301,11 @@ mod tests { let env_resource_provider = super::TracerProvider::builder().build(); assert_eq!( env_resource_provider.config().resource, - Some(Arc::new(Resource::new(vec![ + Cow::Owned(Resource::new(vec![ KeyValue::new("key1", "value1"), KeyValue::new("k3", "value2"), KeyValue::new("service.name", "unknown_service"), - ]))) + ])) ); // When `OTEL_RESOURCE_ATTRIBUTES` is set and also user provided config @@ -328,27 +315,26 @@ mod tests { ); let user_provided_resource_config_provider = super::TracerProvider::builder() .with_config(Config { - resource: Some(Arc::new(Resource::new(vec![KeyValue::new( - "my-custom-key", - "my-custom-value", - )]))), + resource: Cow::Owned(Resource::default().merge(&mut Resource::new(vec![ + KeyValue::new("my-custom-key", "my-custom-value"), + ]))), ..Default::default() }) .build(); assert_eq!( user_provided_resource_config_provider.config().resource, - Some(Arc::new(Resource::new(vec![ + Cow::Owned(Resource::new(vec![ KeyValue::new("my-custom-key", "my-custom-value"), KeyValue::new("k2", "value2"), KeyValue::new("service.name", "unknown_service"), - ]))) + ])) ); env::remove_var("OTEL_RESOURCE_ATTRIBUTES"); // If user provided a resource, it takes priority during collision. let no_service_name = super::TracerProvider::builder() .with_config(Config { - resource: Some(Arc::new(Resource::empty())), + resource: Cow::Owned(Resource::empty()), ..Default::default() }) .build(); diff --git a/opentelemetry-sdk/src/trace/sampler.rs b/opentelemetry-sdk/src/trace/sampler.rs index 8928632584..d4530f31ab 100644 --- a/opentelemetry-sdk/src/trace/sampler.rs +++ b/opentelemetry-sdk/src/trace/sampler.rs @@ -38,14 +38,25 @@ //! MUST NOT allow this combination. use crate::InstrumentationLibrary; +use opentelemetry_api::trace::OrderMap; use opentelemetry_api::{ trace::{ Link, SamplingDecision, SamplingResult, SpanKind, TraceContextExt, TraceId, TraceState, }, - Context, KeyValue, + Context, Key, Value, }; use std::convert::TryInto; +#[cfg(feature = "jaeger_remote_sampler")] +mod jaeger_remote; + +#[cfg(feature = "jaeger_remote_sampler")] +use jaeger_remote::JaegerRemoteSampler; +#[cfg(feature = "jaeger_remote_sampler")] +pub use jaeger_remote::JaegerRemoteSamplerBuilder; +#[cfg(feature = "jaeger_remote_sampler")] +use opentelemetry_http::HttpClient; + /// The `ShouldSample` interface allows implementations to provide samplers /// which will return a sampling `SamplingResult` based on information that /// is typically available just before the `Span` was created. @@ -58,14 +69,15 @@ pub trait ShouldSample: Send + Sync + std::fmt::Debug { trace_id: TraceId, name: &str, span_kind: &SpanKind, - attributes: &[KeyValue], + attributes: &OrderMap, links: &[Link], instrumentation_library: &InstrumentationLibrary, ) -> SamplingResult; } -/// Sampling options -#[derive(Clone, Debug, PartialEq)] +/// Build in samplers. +#[derive(Clone, Debug)] +#[non_exhaustive] pub enum Sampler { /// Always sample the trace AlwaysOn, @@ -77,6 +89,43 @@ pub enum Sampler { /// sampled, then it's child spans will automatically be sampled. Fractions < 0 are treated as /// zero, but spans may still be sampled if their parent is. TraceIdRatioBased(f64), + /// Jaeger remote sampler supports any remote service that implemented the jaeger remote sampler protocol. + /// The proto definition can be found [here](https://github.com/jaegertracing/jaeger-idl/blob/main/proto/api_v2/sampling.proto) + /// + /// Jaeger remote sampler allows remotely controlling the sampling configuration for the SDKs. + /// The sampling is typically configured at the collector and the SDKs actively poll for changes. + /// The sampler uses TraceIdRatioBased or rate-limited sampler under the hood. + /// These samplers can be configured per whole service (a.k.a default), or per span name in a + /// given service (a.k.a per operation). + #[cfg(feature = "jaeger_remote_sampler")] + JaegerRemote(JaegerRemoteSampler), +} + +impl Sampler { + /// Create a jaeger remote sampler. + /// + /// user needs to provide + /// - a `runtime` to run the http client + /// - a http client to query the sampling endpoint + /// - a default sampler to make sampling decision when the remote is unavailable or before the SDK receive the first response, + /// - the service name. This is a required parameter to query the sampling endpoint. + /// + /// See [here](https://github.com/open-telemetry/opentelemetry-rust/blob/main/examples/jaeger-remote-sampler/src/main.rs) for an example. + #[cfg(feature = "jaeger_remote_sampler")] + pub fn jaeger_remote( + runtime: R, + http_client: C, + default_sampler: Sampler, + service_name: Svc, + ) -> JaegerRemoteSamplerBuilder + where + C: HttpClient + 'static, + Sampler: ShouldSample, + R: crate::trace::TraceRuntime, + Svc: Into, + { + JaegerRemoteSamplerBuilder::new(runtime, http_client, default_sampler, service_name) + } } impl ShouldSample for Sampler { @@ -86,7 +135,7 @@ impl ShouldSample for Sampler { trace_id: TraceId, name: &str, span_kind: &SpanKind, - attributes: &[KeyValue], + attributes: &OrderMap, links: &[Link], instrumentation_library: &InstrumentationLibrary, ) -> SamplingResult { @@ -121,27 +170,22 @@ impl ShouldSample for Sampler { ) } // Probabilistically sample the trace. - Sampler::TraceIdRatioBased(prob) => { - if *prob >= 1.0 { - SamplingDecision::RecordAndSample - } else { - let prob_upper_bound = (prob.max(0.0) * (1u64 << 63) as f64) as u64; - // TODO: update behavior when the spec definition resolves - // https://github.com/open-telemetry/opentelemetry-specification/issues/1413 - let bytes = trace_id.to_bytes(); - let (_, low) = bytes.split_at(8); - let trace_id_low = u64::from_be_bytes(low.try_into().unwrap()); - let rnd_from_trace_id = trace_id_low >> 1; - - if rnd_from_trace_id < prob_upper_bound { - SamplingDecision::RecordAndSample - } else { - SamplingDecision::Drop - } - } + Sampler::TraceIdRatioBased(prob) => sample_based_on_probability(prob, trace_id), + #[cfg(feature = "jaeger_remote_sampler")] + Sampler::JaegerRemote(remote_sampler) => { + remote_sampler + .should_sample( + parent_context, + trace_id, + name, + span_kind, + attributes, + links, + instrumentation_library, + ) + .decision } }; - SamplingResult { decision, // No extra attributes ever set by the SDK samplers. @@ -155,6 +199,26 @@ impl ShouldSample for Sampler { } } +pub(crate) fn sample_based_on_probability(prob: &f64, trace_id: TraceId) -> SamplingDecision { + if *prob >= 1.0 { + SamplingDecision::RecordAndSample + } else { + let prob_upper_bound = (prob.max(0.0) * (1u64 << 63) as f64) as u64; + // TODO: update behavior when the spec definition resolves + // https://github.com/open-telemetry/opentelemetry-specification/issues/1413 + let bytes = trace_id.to_bytes(); + let (_, low) = bytes.split_at(8); + let trace_id_low = u64::from_be_bytes(low.try_into().unwrap()); + let rnd_from_trace_id = trace_id_low >> 1; + + if rnd_from_trace_id < prob_upper_bound { + SamplingDecision::RecordAndSample + } else { + SamplingDecision::Drop + } + } +} + #[cfg(all(test, feature = "testing", feature = "trace"))] mod tests { use super::*; @@ -242,7 +306,7 @@ mod tests { trace_id, name, &SpanKind::Internal, - &[], + &Default::default(), &[], &InstrumentationLibrary::default(), ) @@ -284,7 +348,7 @@ mod tests { TraceId::from_u128(1), "should sample", &SpanKind::Internal, - &[], + &Default::default(), &[], &instrumentation_library, ); diff --git a/opentelemetry-sdk/src/trace/sampler/jaeger_remote/mod.rs b/opentelemetry-sdk/src/trace/sampler/jaeger_remote/mod.rs new file mode 100644 index 0000000000..c69817b88f --- /dev/null +++ b/opentelemetry-sdk/src/trace/sampler/jaeger_remote/mod.rs @@ -0,0 +1,10 @@ +mod rate_limit; +mod remote; +mod sampler; +mod sampling_strategy; + +pub(crate) use sampler::JaegerRemoteSampler; +pub use sampler::JaegerRemoteSamplerBuilder; + +#[cfg(test)] +mod tests {} diff --git a/opentelemetry-sdk/src/trace/sampler/jaeger_remote/rate_limit.rs b/opentelemetry-sdk/src/trace/sampler/jaeger_remote/rate_limit.rs new file mode 100644 index 0000000000..db3990cec9 --- /dev/null +++ b/opentelemetry-sdk/src/trace/sampler/jaeger_remote/rate_limit.rs @@ -0,0 +1,108 @@ +use opentelemetry_api::trace::TraceError; +use std::time::SystemTime; + +// leaky bucket based rate limit +// should be Send+Sync +pub(crate) struct LeakyBucket { + span_per_sec: f64, + available: f64, + bucket_size: f64, + last_time: SystemTime, +} + +impl LeakyBucket { + pub(crate) fn new(bucket_size: f64, span_per_sec: f64) -> LeakyBucket { + LeakyBucket { + span_per_sec, + available: bucket_size, + bucket_size, + last_time: opentelemetry_api::time::now(), + } + } + + pub(crate) fn update(&mut self, span_per_sec: f64) { + self.span_per_sec = span_per_sec; + } + + pub(crate) fn should_sample(&mut self) -> bool { + self.check_availability(opentelemetry_api::time::now) + } + + fn check_availability(&mut self, now: F) -> bool + where + F: Fn() -> SystemTime, + { + if self.available >= 1.0 { + self.available -= 1.0; + true + } else { + let cur_time = now(); + let elapsed = cur_time.duration_since(self.last_time); + match elapsed { + Ok(dur) => { + self.last_time = cur_time; + self.available = f64::min( + dur.as_secs() as f64 * self.span_per_sec + self.available, + self.bucket_size, + ); + + if self.available >= 1.0 { + self.available -= 1.0; + true + } else { + false + } + } + Err(_) => { + opentelemetry_api::global::handle_error(TraceError::Other( + "jaeger remote sampler gets rewinded timestamp".into(), + )); + true + } + } + } + } +} + +#[cfg(test)] +mod tests { + use crate::trace::sampler::jaeger_remote::rate_limit::LeakyBucket; + use std::ops::{Add, Sub}; + use std::time::{Duration, SystemTime}; + + #[test] + fn test_leaky_bucket() { + // maximum bucket size 2, add 1 allowance every 10 seconds + let mut leaky_bucket = LeakyBucket::new(2.0, 0.1); + let current_time = SystemTime::now(); + leaky_bucket.last_time = current_time; + + let test_cases = vec![ + (0, vec![true, true, false]), + (1, vec![false]), + (5, vec![false]), + (10, vec![true, false]), + (60, vec![true, true, false]), // maximum allowance is 2 + ]; + + for (elapsed_sec, cases) in test_cases.into_iter() { + for should_pass in cases { + assert_eq!( + should_pass, + leaky_bucket.check_availability(|| { + current_time.add(Duration::from_secs(elapsed_sec)) + }) + ) + } + } + } + + #[test] + fn test_rewind_clock_should_pass() { + let mut leaky_bucket = LeakyBucket::new(2.0, 0.1); + let current_time = SystemTime::now(); + leaky_bucket.last_time = current_time; + + assert!(leaky_bucket.check_availability(|| { current_time.sub(Duration::from_secs(10)) })) + } +} diff --git a/opentelemetry-sdk/src/trace/sampler/jaeger_remote/remote.rs b/opentelemetry-sdk/src/trace/sampler/jaeger_remote/remote.rs new file mode 100644 index 0000000000..3e2aa2d3d1 --- /dev/null +++ b/opentelemetry-sdk/src/trace/sampler/jaeger_remote/remote.rs @@ -0,0 +1,87 @@ +/// Generate types based on proto + +/// ProbabilisticSamplingStrategy samples traces with a fixed probability. +#[derive(serde::Serialize, serde::Deserialize, PartialOrd, PartialEq)] +#[serde(rename_all = "camelCase")] +pub(crate) struct ProbabilisticSamplingStrategy { + /// samplingRate is the sampling probability in the range [0.0, 1.0]. + pub(crate) sampling_rate: f64, +} + +/// RateLimitingSamplingStrategy samples a fixed number of traces per time interval. +/// The typical implementations use the leaky bucket algorithm. +#[derive(serde::Serialize, serde::Deserialize, PartialOrd, PartialEq)] +#[serde(rename_all = "camelCase")] +pub(crate) struct RateLimitingSamplingStrategy { + /// TODO this field type should be changed to double, to support rates like 1 per minute. + pub(crate) max_traces_per_second: i32, +} + +/// OperationSamplingStrategy is a sampling strategy for a given operation +/// (aka endpoint, span name). Only probabilistic sampling is currently supported. +#[derive(serde::Serialize, serde::Deserialize, PartialOrd, PartialEq)] +#[serde(rename_all = "camelCase")] +pub(crate) struct OperationSamplingStrategy { + pub(crate) operation: String, + pub(crate) probabilistic_sampling: ProbabilisticSamplingStrategy, +} + +/// PerOperationSamplingStrategies is a combination of strategies for different endpoints +/// as well as some service-wide defaults. It is particularly useful for services whose +/// endpoints receive vastly different traffic, so that any single rate of sampling would +/// result in either too much data for some endpoints or almost no data for other endpoints. +#[derive(serde::Serialize, serde::Deserialize, PartialOrd, PartialEq)] +#[serde(rename_all = "camelCase")] +pub(crate) struct PerOperationSamplingStrategies { + /// defaultSamplingProbability is the sampling probability for spans that do not match + /// any of the perOperationStrategies. + pub(crate) default_sampling_probability: f64, + /// defaultLowerBoundTracesPerSecond defines a lower-bound rate limit used to ensure that + /// there is some minimal amount of traces sampled for an endpoint that might otherwise + /// be never sampled via probabilistic strategies. The limit is local to a service instance, + /// so if a service is deployed with many (N) instances, the effective minimum rate of sampling + /// will be N times higher. This setting applies to ALL operations, whether or not they match + /// one of the perOperationStrategies. + pub(crate) default_lower_bound_traces_per_second: f64, + /// perOperationStrategies describes sampling strategiesf for individual operations within + /// a given service. + pub(crate) per_operation_strategies: Vec, + /// defaultUpperBoundTracesPerSecond defines an upper bound rate limit. + /// However, almost no Jaeger SDKs support this parameter. + pub(crate) default_upper_bound_traces_per_second: f64, +} + +/// SamplingStrategyResponse contains an overall sampling strategy for a given service. +/// This type should be treated as a union where only one of the strategy field is present. +#[derive(serde::Serialize, serde::Deserialize, PartialOrd, PartialEq)] +#[serde(rename_all = "camelCase")] +pub(crate) struct SamplingStrategyResponse { + /// Legacy field that was meant to indicate which one of the strategy fields + /// below is present. This enum was not extended when per-operation strategy + /// was introduced, because extending enum has backwards compatiblity issues. + /// The recommended approach for consumers is to ignore this field and instead + /// checks the other fields being not null (starting with operationSampling). + /// For producers, it is recommended to set this field correctly for probabilistic + /// and rate-limiting strategies, but if per-operation strategy is returned, + /// the enum can be set to 0 (probabilistic). + pub(crate) strategy_type: SamplingStrategyType, + pub(crate) probabilistic_sampling: Option, + pub(crate) rate_limiting_sampling: Option, + pub(crate) operation_sampling: Option, +} + +/// SamplingStrategyParameters defines request parameters for remote sampler. +#[derive(serde::Serialize, serde::Deserialize, PartialOrd, PartialEq)] +#[serde(rename_all = "camelCase")] +pub(crate) struct SamplingStrategyParameters { + /// serviceName is a required argument. + pub(crate) service_name: String, +} + +/// See description of the SamplingStrategyResponse.strategyType field. +#[derive(serde::Serialize, serde::Deserialize, PartialOrd, PartialEq)] +#[serde(rename_all = "UPPERCASE")] +pub(crate) enum SamplingStrategyType { + Probabilistic, + RateLimiting, +} diff --git a/opentelemetry-sdk/src/trace/sampler/jaeger_remote/sampler.rs b/opentelemetry-sdk/src/trace/sampler/jaeger_remote/sampler.rs new file mode 100644 index 0000000000..6d816f08f3 --- /dev/null +++ b/opentelemetry-sdk/src/trace/sampler/jaeger_remote/sampler.rs @@ -0,0 +1,258 @@ +use crate::trace::sampler::jaeger_remote::remote::SamplingStrategyResponse; +use crate::trace::sampler::jaeger_remote::sampling_strategy::Inner; +use crate::trace::{Sampler, ShouldSample, TraceRuntime}; +use futures_util::{stream, StreamExt as _}; +use http::Uri; +use opentelemetry_api::trace::{Link, OrderMap, SamplingResult, SpanKind, TraceError, TraceId}; +use opentelemetry_api::{global, Context, InstrumentationLibrary, Key, Value}; +use opentelemetry_http::HttpClient; +use std::str::FromStr; +use std::sync::Arc; +use std::time::Duration; + +const DEFAULT_REMOTE_SAMPLER_ENDPOINT: &str = "http://localhost:5778/sampling"; + +/// builder of JaegerRemoteSampler. +/// See [Sampler::jaeger_remote] for details. +#[derive(Debug)] +pub struct JaegerRemoteSamplerBuilder +where + R: TraceRuntime, + C: HttpClient + 'static, + S: ShouldSample + 'static, +{ + pub(crate) update_interval: Duration, + pub(crate) client: C, + pub(crate) endpoint: String, + pub(crate) default_sampler: S, + pub(crate) leaky_bucket_size: f64, + pub(crate) runtime: R, + pub(crate) service_name: String, +} + +impl JaegerRemoteSamplerBuilder +where + C: HttpClient + 'static, + S: ShouldSample + 'static, + R: TraceRuntime, +{ + pub(crate) fn new( + runtime: R, + http_client: C, + default_sampler: S, + service_name: Svc, + ) -> Self + where + Svc: Into, + { + JaegerRemoteSamplerBuilder { + runtime, + update_interval: Duration::from_secs(60 * 5), + client: http_client, + endpoint: DEFAULT_REMOTE_SAMPLER_ENDPOINT.to_string(), + default_sampler, + leaky_bucket_size: 100.0, + service_name: service_name.into(), + } + } + + /// Change how often the SDK should fetch the sampling strategy from remote servers + /// + /// By default it fetches every 5 minutes. + /// + /// A shorter interval have a performance overhead and should be avoid. + pub fn with_update_interval(self, interval: Duration) -> Self { + Self { + update_interval: interval, + ..self + } + } + + /// The endpoint of remote servers. + /// + /// By default it's `http://localhost:5778/sampling`. + /// + /// If the service name is provided as part of the + pub fn with_endpoint>(self, endpoint: Str) -> Self { + Self { + endpoint: endpoint.into(), + ..self + } + } + + /// The size of the leaky bucket. + /// + /// It's used when sampling strategy is rate limiting. + pub fn with_leaky_bucket_size(self, size: f64) -> Self { + Self { + leaky_bucket_size: size, + ..self + } + } + + /// Build a jaeger remote sampler. + /// + /// Return errors when the endpoint provided is invalid(e.g, service name is empty) + pub fn build(self) -> Result { + let endpoint = Self::get_endpoint(&self.endpoint, &self.service_name) + .map_err(|err_str| TraceError::Other(err_str.into()))?; + + Ok(Sampler::JaegerRemote(JaegerRemoteSampler::new( + self.runtime, + self.update_interval, + self.client, + endpoint, + self.default_sampler, + self.leaky_bucket_size, + ))) + } + + fn get_endpoint(endpoint: &str, service_name: &str) -> Result { + if endpoint.is_empty() || service_name.is_empty() { + return Err("endpoint and service name cannot be empty".to_string()); + } + let mut endpoint = url::Url::parse(endpoint) + .unwrap_or_else(|_| url::Url::parse(DEFAULT_REMOTE_SAMPLER_ENDPOINT).unwrap()); + endpoint + .query_pairs_mut() + .append_pair("service", service_name); + + Uri::from_str(endpoint.as_str()).map_err(|_err| "invalid service name".to_string()) + } +} + +/// Sampler that fetches the sampling configuration from remotes. +/// +/// Note that the backend doesn't need to be Jaeger so long as it supports jaeger remote sampling +/// protocol. +#[derive(Clone, Debug)] +pub struct JaegerRemoteSampler { + inner: Arc, + default_sampler: Arc, +} + +impl JaegerRemoteSampler { + fn new( + runtime: R, + update_timeout: Duration, + client: C, + endpoint: Uri, + default_sampler: S, + leaky_bucket_size: f64, + ) -> Self + where + R: TraceRuntime, + C: HttpClient + 'static, + S: ShouldSample + 'static, + { + let (shutdown_tx, shutdown_rx) = futures_channel::mpsc::channel(1); + let inner = Arc::new(Inner::new(leaky_bucket_size, shutdown_tx)); + let sampler = JaegerRemoteSampler { + inner, + default_sampler: Arc::new(default_sampler), + }; + Self::run_update_task( + runtime, + sampler.inner.clone(), + update_timeout, + client, + shutdown_rx, + endpoint, + ); + sampler + } + + // start a updating thread/task + fn run_update_task( + runtime: R, + strategy: Arc, + update_timeout: Duration, + client: C, + shutdown: futures_channel::mpsc::Receiver<()>, + endpoint: Uri, + ) where + R: TraceRuntime, + C: HttpClient + 'static, + { + // todo: review if we need 'static here + let interval = runtime.interval(update_timeout); + runtime.spawn(Box::pin(async move { + // either update or shutdown + let mut update = Box::pin(stream::select( + shutdown.map(|_| false), + interval.map(|_| true), + )); + + while let Some(should_update) = update.next().await { + if should_update { + // poll next available configuration or shutdown + // send request + match Self::request_new_strategy(&client, endpoint.clone()).await { + Ok(remote_strategy_resp) => strategy.update(remote_strategy_resp), + Err(err_msg) => global::handle_error(TraceError::Other(err_msg.into())), + }; + } else { + // shutdown + break; + } + } + })); + } + + async fn request_new_strategy( + client: &C, + endpoint: Uri, + ) -> Result + where + C: HttpClient, + { + let request = http::Request::get(endpoint) + .header("Content-Type", "application/json") + .body(Vec::new()) + .unwrap(); + + let resp = client + .send(request) + .await + .map_err(|err| format!("the request is failed to send {}", err))?; + + // process failures + if resp.status() != http::StatusCode::OK { + return Err(format!( + "the http response code is not 200 but {}", + resp.status() + )); + } + + // deserialize the response + serde_json::from_slice(&resp.body()[..]) + .map_err(|err| format!("cannot deserialize the response, {}", err)) + } +} + +impl ShouldSample for JaegerRemoteSampler { + fn should_sample( + &self, + parent_context: Option<&Context>, + trace_id: TraceId, + name: &str, + span_kind: &SpanKind, + attributes: &OrderMap, + links: &[Link], + instrumentation_library: &InstrumentationLibrary, + ) -> SamplingResult { + self.inner + .should_sample(parent_context, trace_id, name) + .unwrap_or_else(|| { + self.default_sampler.should_sample( + parent_context, + trace_id, + name, + span_kind, + attributes, + links, + instrumentation_library, + ) + }) + } +} diff --git a/opentelemetry-sdk/src/trace/sampler/jaeger_remote/sampling_strategy.rs b/opentelemetry-sdk/src/trace/sampler/jaeger_remote/sampling_strategy.rs new file mode 100644 index 0000000000..789f850eb8 --- /dev/null +++ b/opentelemetry-sdk/src/trace/sampler/jaeger_remote/sampling_strategy.rs @@ -0,0 +1,226 @@ +use crate::trace::sampler::jaeger_remote::remote::{ + PerOperationSamplingStrategies, ProbabilisticSamplingStrategy, RateLimitingSamplingStrategy, + SamplingStrategyResponse, +}; +use crate::trace::sampler::sample_based_on_probability; +use opentelemetry_api::trace::{ + SamplingDecision, SamplingResult, TraceContextExt, TraceError, TraceId, TraceState, +}; +use opentelemetry_api::{global, Context}; +use std::collections::HashMap; +use std::fmt::{Debug, Formatter}; +use std::sync::Mutex; + +use super::rate_limit::LeakyBucket; + +// todo: remove the mutex as probabilistic doesn't require mutable ref +// sampling strategy that sent by remote agents or collectors. +enum Strategy { + // probability to sample between [0.0, 1.0] + Probabilistic(f64), + //maxTracesPerSecond + RateLimiting(LeakyBucket), + PerOperation(PerOperationStrategies), +} + +pub(crate) struct Inner { + strategy: Mutex>, + // initial configuration for leaky bucket + leaky_bucket_size: f64, + shut_down: futures_channel::mpsc::Sender<()>, +} + +impl Debug for Inner { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + //todo: add more debug information + f.debug_struct("JaegerRemoteSamplerInner") + .field("leaky_bucket_size", &self.leaky_bucket_size) + .finish() + } +} + +impl Drop for Inner { + fn drop(&mut self) { + let _ = self.shut_down.try_send(()); + } +} + +impl Inner { + pub(crate) fn new( + leaky_bucket_size: f64, + shut_down: futures_channel::mpsc::Sender<()>, + ) -> Self { + Inner { + strategy: Mutex::new(None), + leaky_bucket_size, + shut_down, + } + } + + pub(crate) fn update(&self, remote_strategy_resp: SamplingStrategyResponse) { + self.strategy + .lock() + .map(|mut old_strategy_opt| { + *old_strategy_opt = match old_strategy_opt.take() { + Some(mut old_strategy) => { + // update sample strategy + // the response should be an union type where + // - operation_sampling + // - rate_limiting_sampling + // - probabilistic_sampling + // are mutually exclusive. + match ( + remote_strategy_resp.operation_sampling, + remote_strategy_resp.rate_limiting_sampling, + remote_strategy_resp.probabilistic_sampling, + &mut old_strategy, + ) { + ( + None, + Some(rate_limiting), + None, + Strategy::RateLimiting(leaky_bucket), + ) => { + leaky_bucket.update(rate_limiting.max_traces_per_second as f64); + // in the future the remote response may support f64 + Some(old_strategy) + } + // only leaky bucket is a stateful sampler, meaning it's update is different from the initialization + // for other sampler, we can just re-init it + ( + operation_sampling, + rate_limiting_sampling, + probabilistic_sampling, + _, + ) => self.init_strategy( + operation_sampling, + rate_limiting_sampling, + probabilistic_sampling, + ), + } + } + None => self.init_strategy( + remote_strategy_resp.operation_sampling, + remote_strategy_resp.rate_limiting_sampling, + remote_strategy_resp.probabilistic_sampling, + ), + } + }) + .unwrap_or_else(|_err| { + global::handle_error(TraceError::Other( + "jaeger remote sampler mutex poisoned".into(), + )) + }); + } + + fn init_strategy( + &self, + operation_sampling: Option, + rate_limiting_sampling: Option, + probabilistic_sampling: Option, + ) -> Option { + match ( + operation_sampling, + rate_limiting_sampling, + probabilistic_sampling, + ) { + (Some(op_sampling), _, _) => { + // ops sampling + let mut per_ops_sampling = PerOperationStrategies::default(); + per_ops_sampling.update(op_sampling); + Some(Strategy::PerOperation(per_ops_sampling)) + } + (_, Some(rate_limiting), _) => Some(Strategy::RateLimiting(LeakyBucket::new( + self.leaky_bucket_size, + rate_limiting.max_traces_per_second as f64, + ))), + (_, _, Some(probabilistic)) => { + Some(Strategy::Probabilistic(probabilistic.sampling_rate)) + } + _ => None, + } + } + + pub(crate) fn should_sample( + &self, + parent_context: Option<&Context>, + trace_id: TraceId, + name: &str, + ) -> Option { + self.strategy + .lock() + .map(|mut inner_opt| match inner_opt.as_mut() { + Some(inner) => { + let decision = match inner { + Strategy::RateLimiting(leaky_bucket) => { + if leaky_bucket.should_sample() { + SamplingDecision::RecordAndSample + } else { + SamplingDecision::Drop + } + } + Strategy::Probabilistic(prob) => { + sample_based_on_probability(prob, trace_id) + } + Strategy::PerOperation(per_operation_strategies) => { + sample_based_on_probability( + &per_operation_strategies.get_probability(name), + trace_id, + ) + } + }; + + Some(SamplingResult { + decision, + attributes: Vec::new(), + trace_state: match parent_context { + Some(ctx) => ctx.span().span_context().trace_state().clone(), + None => TraceState::default(), + }, + }) + } + None => None, + }) + .unwrap_or_else(|_| None) + } +} + +#[derive(Default)] +pub(crate) struct PerOperationStrategies { + default_prob: f64, + default_lower_bound_traces_per_second: f64, + operation_prob: HashMap, + // todo: guarantee the throughput using lower bound and upper bound + default_upper_bound_traces_per_second: f64, +} + +impl PerOperationStrategies { + pub(crate) fn update(&mut self, remote_strategies: PerOperationSamplingStrategies) { + self.default_prob = remote_strategies.default_sampling_probability as f64; + self.default_lower_bound_traces_per_second = + remote_strategies.default_lower_bound_traces_per_second as f64; + self.default_upper_bound_traces_per_second = + remote_strategies.default_upper_bound_traces_per_second as f64; + + self.operation_prob = remote_strategies + .per_operation_strategies + .into_iter() + .map(|op_strategy| { + ( + op_strategy.operation, + op_strategy.probabilistic_sampling.sampling_rate, + ) + }) + .collect(); + } + + pub(crate) fn get_probability(&self, operation: &str) -> f64 { + *self + .operation_prob + .get(operation) + .unwrap_or(&self.default_prob) + } +} + +#[cfg(test)] +mod tests {} diff --git a/opentelemetry-sdk/src/trace/span.rs b/opentelemetry-sdk/src/trace/span.rs index 03383c9322..f9c942981a 100644 --- a/opentelemetry-sdk/src/trace/span.rs +++ b/opentelemetry-sdk/src/trace/span.rs @@ -9,10 +9,10 @@ //! is possible to change its name, set its `Attributes`, and add `Links` and `Events`. //! These cannot be changed after the `Span`'s end time has been set. use crate::trace::SpanLimits; +use crate::Resource; use opentelemetry_api::trace::{Event, SpanContext, SpanId, SpanKind, Status}; use opentelemetry_api::{trace, KeyValue}; use std::borrow::Cow; -use std::sync::Arc; use std::time::SystemTime; /// Single operation within a trace. @@ -74,11 +74,8 @@ impl Span { /// overhead. pub fn exported_data(&self) -> Option { let (span_context, tracer) = (self.span_context.clone(), &self.tracer); - let resource = if let Some(provider) = self.tracer.provider() { - provider.config().resource.clone() - } else { - None - }; + let resource = self.tracer.provider()?.config().resource.clone(); + self.data .as_ref() .map(|data| build_export_data(data.clone(), span_context, resource, tracer)) @@ -221,7 +218,7 @@ impl Drop for Span { fn build_export_data( data: SpanData, span_context: SpanContext, - resource: Option>, + resource: Cow<'static, Resource>, tracer: &crate::trace::Tracer, ) -> crate::export::trace::SpanData { crate::export::trace::SpanData { diff --git a/opentelemetry-sdk/src/trace/span_processor.rs b/opentelemetry-sdk/src/trace/span_processor.rs index 001dc763f9..8436181d04 100644 --- a/opentelemetry-sdk/src/trace/span_processor.rs +++ b/opentelemetry-sdk/src/trace/span_processor.rs @@ -38,8 +38,12 @@ use crate::export::trace::{ExportResult, SpanData, SpanExporter}; use crate::trace::runtime::{TraceRuntime, TrySend}; use crate::trace::Span; use futures_channel::oneshot; -use futures_util::future::{self, Either}; -use futures_util::{pin_mut, stream, StreamExt as _}; +use futures_util::{ + future::{self, BoxFuture, Either}, + select, + stream::{self, FusedStream, FuturesUnordered}, + Stream, StreamExt as _, +}; use opentelemetry_api::global; use opentelemetry_api::{ trace::{TraceError, TraceResult}, @@ -63,6 +67,11 @@ const OTEL_BSP_MAX_EXPORT_BATCH_SIZE_DEFAULT: usize = 512; const OTEL_BSP_EXPORT_TIMEOUT: &str = "OTEL_BSP_EXPORT_TIMEOUT"; /// Default maximum allowed time to export data. const OTEL_BSP_EXPORT_TIMEOUT_DEFAULT: u64 = 30_000; +/// Environment variable to configure max concurrent exports for batch span +/// processor. +const OTEL_BSP_MAX_CONCURRENT_EXPORTS: &str = "OTEL_BSP_MAX_CONCURRENT_EXPORTS"; +/// Default max concurrent exports for BSP +const OTEL_BSP_MAX_CONCURRENT_EXPORTS_DEFAULT: usize = 1; /// `SpanProcessor` is an interface which allows hooks for span start and end /// method invocations. The span processors are invoked only when is_recording @@ -277,12 +286,153 @@ pub enum BatchMessage { Shutdown(oneshot::Sender), } +struct BatchSpanProcessorInternal { + spans: Vec, + export_tasks: FuturesUnordered>, + runtime: R, + exporter: Box, + config: BatchConfig, +} + +impl BatchSpanProcessorInternal { + async fn flush(&mut self, res_channel: Option>) { + let export_task = self.export(); + let task = Box::pin(async move { + let result = export_task.await; + + if let Some(channel) = res_channel { + if let Err(result) = channel.send(result) { + global::handle_error(TraceError::from(format!( + "failed to send flush result: {:?}", + result + ))); + } + } else if let Err(err) = result { + global::handle_error(err); + } + + Ok(()) + }); + + if self.config.max_concurrent_exports == 1 { + let _ = task.await; + } else { + self.export_tasks.push(task); + while self.export_tasks.next().await.is_some() {} + } + } + + /// Process a single message + /// + /// A return value of false indicates shutdown + async fn process_message(&mut self, message: BatchMessage) -> bool { + match message { + // Span has finished, add to buffer of pending spans. + BatchMessage::ExportSpan(span) => { + self.spans.push(span); + + if self.spans.len() == self.config.max_export_batch_size { + // If concurrent exports are saturated, wait for one to complete. + if !self.export_tasks.is_empty() + && self.export_tasks.len() == self.config.max_concurrent_exports + { + self.export_tasks.next().await; + } + + let export_task = self.export(); + let task = async move { + if let Err(err) = export_task.await { + global::handle_error(err); + } + + Ok(()) + }; + // Special case when not using concurrent exports + if self.config.max_concurrent_exports == 1 { + let _ = task.await; + } else { + self.export_tasks.push(Box::pin(task)); + } + } + } + // Span batch interval time reached or a force flush has been invoked, export + // current spans. + // + // This is a hint to ensure that any tasks associated with Spans for which the + // SpanProcessor had already received events prior to the call to ForceFlush + // SHOULD be completed as soon as possible, preferably before returning from + // this method. + // + // In particular, if any SpanProcessor has any associated exporter, it SHOULD + // try to call the exporter's Export with all spans for which this was not + // already done and then invoke ForceFlush on it. The built-in SpanProcessors + // MUST do so. If a timeout is specified (see below), the SpanProcessor MUST + // prioritize honoring the timeout over finishing all calls. It MAY skip or + // abort some or all Export or ForceFlush calls it has made to achieve this + // goal. + // + // NB: `force_flush` is not currently implemented on exporters; the equivalent + // would be waiting for exporter tasks to complete. In the case of + // channel-coupled exporters, they will need a `force_flush` implementation to + // properly block. + BatchMessage::Flush(res_channel) => { + self.flush(res_channel).await; + } + // Stream has terminated or processor is shutdown, return to finish execution. + BatchMessage::Shutdown(ch) => { + self.flush(Some(ch)).await; + self.exporter.shutdown(); + return false; + } + } + + true + } + + fn export(&mut self) -> BoxFuture<'static, ExportResult> { + // Batch size check for flush / shutdown. Those methods may be called + // when there's no work to do. + if self.spans.is_empty() { + return Box::pin(future::ready(Ok(()))); + } + + let export = self.exporter.export(self.spans.split_off(0)); + let timeout = self.runtime.delay(self.config.max_export_timeout); + let time_out = self.config.max_export_timeout; + + Box::pin(async move { + match future::select(export, timeout).await { + Either::Left((export_res, _)) => export_res, + Either::Right((_, _)) => ExportResult::Err(TraceError::ExportTimedOut(time_out)), + } + }) + } + + async fn run(mut self, mut messages: impl Stream + Unpin + FusedStream) { + loop { + select! { + // FuturesUnordered implements Fuse intelligently such that it + // will become eligible again once new tasks are added to it. + _ = self.export_tasks.next() => { + // An export task completed; do we need to do anything with it? + }, + message = messages.next() => { + match message { + Some(message) => { + if !self.process_message(message).await { + break; + } + }, + None => break, + } + }, + } + } + } +} + impl BatchSpanProcessor { - pub(crate) fn new( - mut exporter: Box, - config: BatchConfig, - runtime: R, - ) -> Self { + pub(crate) fn new(exporter: Box, config: BatchConfig, runtime: R) -> Self { let (message_sender, message_receiver) = runtime.batch_message_channel(config.max_queue_size); let ticker = runtime @@ -290,76 +440,17 @@ impl BatchSpanProcessor { .map(|_| BatchMessage::Flush(None)); let timeout_runtime = runtime.clone(); - // Spawn worker process via user-defined spawn function. - runtime.spawn(Box::pin(async move { - let mut spans = Vec::new(); - let mut messages = Box::pin(stream::select(message_receiver, ticker)); - - while let Some(message) = messages.next().await { - match message { - // Span has finished, add to buffer of pending spans. - BatchMessage::ExportSpan(span) => { - spans.push(span); - - if spans.len() == config.max_export_batch_size { - let result = export_with_timeout( - config.max_export_timeout, - exporter.as_mut(), - &timeout_runtime, - spans.split_off(0), - ) - .await; - - if let Err(err) = result { - global::handle_error(err); - } - } - } - // Span batch interval time reached or a force flush has been invoked, export current spans. - BatchMessage::Flush(res_channel) => { - let result = export_with_timeout( - config.max_export_timeout, - exporter.as_mut(), - &timeout_runtime, - spans.split_off(0), - ) - .await; - - if let Some(channel) = res_channel { - if let Err(result) = channel.send(result) { - global::handle_error(TraceError::from(format!( - "failed to send flush result: {:?}", - result - ))); - } - } else if let Err(err) = result { - global::handle_error(err); - } - } - // Stream has terminated or processor is shutdown, return to finish execution. - BatchMessage::Shutdown(ch) => { - let result = export_with_timeout( - config.max_export_timeout, - exporter.as_mut(), - &timeout_runtime, - spans.split_off(0), - ) - .await; - - exporter.shutdown(); - - if let Err(result) = ch.send(result) { - global::handle_error(TraceError::from(format!( - "failed to send batch processor shutdown result: {:?}", - result - ))); - } + let messages = Box::pin(stream::select(message_receiver, ticker)); + let processor = BatchSpanProcessorInternal { + spans: Vec::new(), + export_tasks: FuturesUnordered::new(), + runtime: timeout_runtime, + config, + exporter, + }; - break; - } - } - } - })); + // Spawn worker process via user-defined spawn function. + runtime.spawn(Box::pin(processor.run(messages))); // Return batch processor with link to worker BatchSpanProcessor { message_sender } @@ -378,30 +469,6 @@ impl BatchSpanProcessor { } } -async fn export_with_timeout( - time_out: Duration, - exporter: &mut E, - runtime: &R, - batch: Vec, -) -> ExportResult -where - R: TraceRuntime, - E: SpanExporter + ?Sized, -{ - if batch.is_empty() { - return Ok(()); - } - - let export = exporter.export(batch); - let timeout = runtime.delay(time_out); - pin_mut!(export); - pin_mut!(timeout); - match future::select(export, timeout).await { - Either::Left((export_res, _)) => export_res, - Either::Right((_, _)) => ExportResult::Err(TraceError::ExportTimedOut(time_out)), - } -} - /// Batch span processor configuration #[derive(Debug)] pub struct BatchConfig { @@ -421,6 +488,13 @@ pub struct BatchConfig { /// The maximum duration to export a batch of data. max_export_timeout: Duration, + + /// Maximum number of concurrent exports + /// + /// Limits the number of spawned tasks for exports and thus memory consumed + /// by an exporter. A value of 1 will cause exports to be performed + /// synchronously on the BatchSpanProcessor task. + max_concurrent_exports: usize, } impl Default for BatchConfig { @@ -430,8 +504,16 @@ impl Default for BatchConfig { scheduled_delay: Duration::from_millis(OTEL_BSP_SCHEDULE_DELAY_DEFAULT), max_export_batch_size: OTEL_BSP_MAX_EXPORT_BATCH_SIZE_DEFAULT, max_export_timeout: Duration::from_millis(OTEL_BSP_EXPORT_TIMEOUT_DEFAULT), + max_concurrent_exports: OTEL_BSP_MAX_CONCURRENT_EXPORTS_DEFAULT, }; + if let Some(max_concurrent_exports) = env::var(OTEL_BSP_MAX_CONCURRENT_EXPORTS) + .ok() + .and_then(|max_concurrent_exports| usize::from_str(&max_concurrent_exports).ok()) + { + config.max_concurrent_exports = max_concurrent_exports; + } + if let Some(max_queue_size) = env::var(OTEL_BSP_MAX_QUEUE_SIZE) .ok() .and_then(|queue_size| usize::from_str(&queue_size).ok()) @@ -524,6 +606,16 @@ where BatchSpanProcessorBuilder { config, ..self } } + /// Set the maximum number of concurrent exports + /// + /// This setting may be useful for limiting network throughput or memory + /// consumption. + pub fn with_max_concurrent_exports(self, max: usize) -> Self { + let mut config = self.config; + config.max_concurrent_exports = max; + BatchSpanProcessorBuilder { config, ..self } + } + /// Build a batch processor pub fn build(self) -> BatchSpanProcessor { BatchSpanProcessor::new(Box::new(self.exporter), self.config, self.runtime) @@ -652,9 +744,12 @@ mod tests { D: Fn(Duration) -> DS + 'static + Send + Sync, DS: Future + Send + Sync + 'static, { - async fn export(&mut self, _batch: Vec) -> ExportResult { - (self.delay_fn)(self.delay_for).await; - Ok(()) + fn export( + &mut self, + _batch: Vec, + ) -> futures_util::future::BoxFuture<'static, ExportResult> { + use futures_util::FutureExt; + Box::pin((self.delay_fn)(self.delay_for).map(|_| Ok(()))) } } diff --git a/opentelemetry-sdk/src/trace/tracer.rs b/opentelemetry-sdk/src/trace/tracer.rs index ace67a34e9..2852d621d7 100644 --- a/opentelemetry-sdk/src/trace/tracer.rs +++ b/opentelemetry-sdk/src/trace/tracer.rs @@ -17,10 +17,10 @@ use crate::{ InstrumentationLibrary, }; use opentelemetry_api::trace::{ - Link, SamplingDecision, SamplingResult, SpanBuilder, SpanContext, SpanId, SpanKind, + Link, OrderMap, SamplingDecision, SamplingResult, SpanBuilder, SpanContext, SpanId, SpanKind, TraceContextExt, TraceFlags, TraceId, TraceState, }; -use opentelemetry_api::{Context, KeyValue}; +use opentelemetry_api::{Context, Key, KeyValue, Value}; use std::fmt; use std::sync::Weak; @@ -72,7 +72,7 @@ impl Tracer { trace_id: TraceId, name: &str, span_kind: &SpanKind, - attributes: &[KeyValue], + attributes: &OrderMap, links: &[Link], config: &Config, instrumentation_library: &InstrumentationLibrary, @@ -218,14 +218,14 @@ impl opentelemetry_api::trace::Tracer for Tracer { } = builder; // Build optional inner context, `None` if not recording. - let mut span = if let Some((flags, mut extra_attrs, trace_state)) = sampling_decision { - if !extra_attrs.is_empty() { - attribute_options.append(&mut extra_attrs); + let mut span = if let Some((flags, extra_attrs, trace_state)) = sampling_decision { + for extra_attr in extra_attrs { + attribute_options.insert(extra_attr.key, extra_attr.value); } let mut attributes = EvictedHashMap::new(span_limits.max_attributes_per_span, attribute_options.len()); - for attribute in attribute_options { - attributes.insert(attribute); + for (key, value) in attribute_options { + attributes.insert(KeyValue::new(key, value)); } let mut links = EvictedQueue::new(span_limits.max_links_per_span); if let Some(link_options) = &mut link_options { @@ -300,10 +300,10 @@ mod tests { }; use opentelemetry_api::{ trace::{ - Link, SamplingDecision, SamplingResult, Span, SpanContext, SpanId, SpanKind, + Link, OrderMap, SamplingDecision, SamplingResult, Span, SpanContext, SpanId, SpanKind, TraceContextExt, TraceFlags, TraceId, TraceState, Tracer, TracerProvider, }, - Context, KeyValue, + Context, Key, Value, }; #[derive(Debug)] @@ -316,7 +316,7 @@ mod tests { _trace_id: TraceId, _name: &str, _span_kind: &SpanKind, - _attributes: &[KeyValue], + _attributes: &OrderMap, _links: &[Link], _instrumentation_library: &InstrumentationLibrary, ) -> SamplingResult { diff --git a/opentelemetry-semantic-conventions/Cargo.toml b/opentelemetry-semantic-conventions/Cargo.toml index 141a0526ef..16247f3b4b 100644 --- a/opentelemetry-semantic-conventions/Cargo.toml +++ b/opentelemetry-semantic-conventions/Cargo.toml @@ -19,7 +19,7 @@ all-features = true rustdoc-args = ["--cfg", "docsrs"] [dependencies] -opentelemetry = { version = "0.18.0", default-features = false, path = "../opentelemetry" } +opentelemetry = { version = "0.18", default-features = false, path = "../opentelemetry" } [dev-dependencies] opentelemetry = { default-features = false, features = ["trace"], path = "../opentelemetry" } diff --git a/opentelemetry-semantic-conventions/src/trace.rs b/opentelemetry-semantic-conventions/src/trace.rs index f6371d0763..01448e16d8 100644 --- a/opentelemetry-semantic-conventions/src/trace.rs +++ b/opentelemetry-semantic-conventions/src/trace.rs @@ -14,13 +14,13 @@ //! ## Usage //! //! ```rust -//! use opentelemetry::{global, trace::Tracer as _}; +//! use opentelemetry::{global, trace::Tracer as _, trace::OrderMap}; //! use opentelemetry_semantic_conventions as semcov; //! //! let tracer = global::tracer("my-component"); //! let _span = tracer //! .span_builder("span-name") -//! .with_attributes(vec![ +//! .with_attributes([ //! semcov::trace::NET_PEER_IP.string("10.0.0.1"), //! semcov::trace::NET_PEER_PORT.i64(80), //! ]) diff --git a/opentelemetry-stackdriver/Cargo.toml b/opentelemetry-stackdriver/Cargo.toml index c73d76b08d..2a407d3226 100644 --- a/opentelemetry-stackdriver/Cargo.toml +++ b/opentelemetry-stackdriver/Cargo.toml @@ -15,14 +15,14 @@ gcp_auth = { version = "0.7", optional = true } hex = "0.4" http = "0.2" hyper = "0.14.2" -hyper-rustls = { version = "0.22.1", optional = true } -opentelemetry = { version = "0.18.0", path = "../opentelemetry" } +hyper-rustls = { version = "0.23", optional = true } +opentelemetry = { version = "0.18", path = "../opentelemetry" } opentelemetry-semantic-conventions = { version = "0.10", path = "../opentelemetry-semantic-conventions" } -prost = "0.9" -prost-types = "0.9" +prost = "0.10.4" +prost-types = "0.10.1" thiserror = "1.0.30" -tonic = { version = "0.6.2", features = ["tls", "transport"] } -yup-oauth2 = { version = "6", optional = true } +tonic = { version = "0.7.2", features = ["tls", "transport"] } +yup-oauth2 = { version = "7.0.1", optional = true } [features] default = ["yup-authorizer", "tls-native-roots"] @@ -34,6 +34,5 @@ tls-webpki-roots = ["tonic/tls-webpki-roots"] reqwest = "0.11.9" tempfile = "3.3.0" tokio = "1" -tonic-build = "0.6.2" +tonic-build = "0.7.2" walkdir = "2.3.2" -which = "4.0.2" diff --git a/opentelemetry-stackdriver/proto/google/logging/type/http_request.proto b/opentelemetry-stackdriver/proto/google/logging/type/http_request.proto index d34fe7be4c..b878d60dce 100644 --- a/opentelemetry-stackdriver/proto/google/logging/type/http_request.proto +++ b/opentelemetry-stackdriver/proto/google/logging/type/http_request.proto @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,7 +17,6 @@ syntax = "proto3"; package google.logging.type; import "google/protobuf/duration.proto"; -import "google/api/annotations.proto"; option csharp_namespace = "Google.Cloud.Logging.Type"; option go_package = "google.golang.org/genproto/googleapis/logging/type;ltype"; diff --git a/opentelemetry-stackdriver/proto/google/logging/type/log_severity.proto b/opentelemetry-stackdriver/proto/google/logging/type/log_severity.proto index 0762b5c93a..bed71935f9 100644 --- a/opentelemetry-stackdriver/proto/google/logging/type/log_severity.proto +++ b/opentelemetry-stackdriver/proto/google/logging/type/log_severity.proto @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,13 +16,12 @@ syntax = "proto3"; package google.logging.type; -import "google/api/annotations.proto"; - option csharp_namespace = "Google.Cloud.Logging.Type"; option go_package = "google.golang.org/genproto/googleapis/logging/type;ltype"; option java_multiple_files = true; option java_outer_classname = "LogSeverityProto"; option java_package = "com.google.logging.type"; +option objc_class_prefix = "GLOG"; option php_namespace = "Google\\Cloud\\Logging\\Type"; option ruby_package = "Google::Cloud::Logging::Type"; diff --git a/opentelemetry-stackdriver/proto/google/logging/v2/log_entry.proto b/opentelemetry-stackdriver/proto/google/logging/v2/log_entry.proto index 3ad2cfbb58..9971293698 100644 --- a/opentelemetry-stackdriver/proto/google/logging/v2/log_entry.proto +++ b/opentelemetry-stackdriver/proto/google/logging/v2/log_entry.proto @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -24,8 +24,6 @@ import "google/logging/type/log_severity.proto"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/timestamp.proto"; -import "google/rpc/status.proto"; -import "google/api/annotations.proto"; option cc_enable_arenas = true; option csharp_namespace = "Google.Cloud.Logging.V2"; @@ -37,8 +35,6 @@ option php_namespace = "Google\\Cloud\\Logging\\V2"; option ruby_package = "Google::Cloud::Logging::V2"; // An individual entry in a log. -// -// message LogEntry { option (google.api.resource) = { type: "logging.googleapis.com/Log" @@ -62,12 +58,13 @@ message LogEntry { // // `[LOG_ID]` must be URL-encoded within `log_name`. Example: // `"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"`. + // // `[LOG_ID]` must be less than 512 characters long and can only include the // following characters: upper and lower case alphanumeric characters, // forward-slash, underscore, hyphen, and period. // // For backward compatibility, if `log_name` begins with a forward-slash, such - // as `/projects/...`, then the log entry is ingested as usual but the + // as `/projects/...`, then the log entry is ingested as usual, but the // forward-slash is removed. Listing the log entry will not show the leading // slash and filtering for a log name with a leading slash will never return // any results. @@ -126,7 +123,7 @@ message LogEntry { // de-duplication in the export of logs. // // If the `insert_id` is omitted when writing a log entry, the Logging API - // assigns its own unique identifier in this field. + // assigns its own unique identifier in this field. // // In queries, the `insert_id` is also used to order log entries that have // the same `log_name` and `timestamp` values. @@ -136,8 +133,20 @@ message LogEntry { // applicable. google.logging.type.HttpRequest http_request = 7 [(google.api.field_behavior) = OPTIONAL]; - // Optional. A set of user-defined (key, value) data that provides additional - // information about the log entry. + // Optional. A map of key, value pairs that provides additional information about the + // log entry. The labels can be user-defined or system-defined. + // + // User-defined labels are arbitrary key, value pairs that you can use to + // classify logs. + // + // System-defined labels are defined by GCP services for platform logs. + // They have two components - a service namespace component and the + // attribute name. For example: `compute.googleapis.com/resource_name`. + // + // Cloud Logging truncates label keys that exceed 512 B and label + // values that exceed 64 KB upon their associated log entry being + // written. The truncation is indicated by an ellipsis at the + // end of the character string. map labels = 11 [(google.api.field_behavior) = OPTIONAL]; // Optional. Information about an operation associated with the log entry, if @@ -168,6 +177,10 @@ message LogEntry { // Optional. Source code location information associated with the log entry, if any. LogEntrySourceLocation source_location = 23 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Information indicating this LogEntry is part of a sequence of multiple log + // entries split from a single LogEntry. + LogSplit split = 35 [(google.api.field_behavior) = OPTIONAL]; } // Additional information about a potentially long-running operation with which @@ -208,3 +221,21 @@ message LogEntrySourceLocation { // (Python). string function = 3 [(google.api.field_behavior) = OPTIONAL]; } + +// Additional information used to correlate multiple log entries. Used when a +// single LogEntry would exceed the Google Cloud Logging size limit and is +// split across multiple log entries. +message LogSplit { + // A globally unique identifier for all log entries in a sequence of split log + // entries. All log entries with the same |LogSplit.uid| are assumed to be + // part of the same sequence of split log entries. + string uid = 1; + + // The index of this LogEntry in the sequence of split log entries. Log + // entries are given |index| values 0, 1, ..., n-1 for a sequence of n log + // entries. + int32 index = 2; + + // The total number of log entries that the original LogEntry was split into. + int32 total_splits = 3; +} diff --git a/opentelemetry-stackdriver/proto/google/logging/v2/logging.proto b/opentelemetry-stackdriver/proto/google/logging/v2/logging.proto index f8b01a71e6..b7f4f189d2 100644 --- a/opentelemetry-stackdriver/proto/google/logging/v2/logging.proto +++ b/opentelemetry-stackdriver/proto/google/logging/v2/logging.proto @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,18 +16,15 @@ syntax = "proto3"; package google.logging.v2; +import "google/api/annotations.proto"; import "google/api/client.proto"; import "google/api/field_behavior.proto"; import "google/api/monitored_resource.proto"; import "google/api/resource.proto"; import "google/logging/v2/log_entry.proto"; -import "google/logging/v2/logging_config.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/empty.proto"; -import "google/protobuf/field_mask.proto"; -import "google/protobuf/timestamp.proto"; import "google/rpc/status.proto"; -import "google/api/annotations.proto"; option cc_enable_arenas = true; option csharp_namespace = "Google.Cloud.Logging.V2"; @@ -48,10 +45,10 @@ service LoggingServiceV2 { "https://www.googleapis.com/auth/logging.read," "https://www.googleapis.com/auth/logging.write"; - // Deletes all the log entries in a log. The log reappears if it receives new - // entries. Log entries written shortly before the delete operation might not - // be deleted. Entries received after the delete operation with a timestamp - // before the operation will be deleted. + // Deletes all the log entries in a log for the _Default Log Bucket. The log + // reappears if it receives new entries. Log entries written shortly before + // the delete operation might not be deleted. Entries received after the + // delete operation with a timestamp before the operation will be deleted. rpc DeleteLog(DeleteLogRequest) returns (google.protobuf.Empty) { option (google.api.http) = { delete: "/v2/{log_name=projects/*/logs/*}" @@ -140,14 +137,15 @@ service LoggingServiceV2 { message DeleteLogRequest { // Required. The resource name of the log to delete: // - // "projects/[PROJECT_ID]/logs/[LOG_ID]" - // "organizations/[ORGANIZATION_ID]/logs/[LOG_ID]" - // "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]" - // "folders/[FOLDER_ID]/logs/[LOG_ID]" + // * `projects/[PROJECT_ID]/logs/[LOG_ID]` + // * `organizations/[ORGANIZATION_ID]/logs/[LOG_ID]` + // * `billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]` + // * `folders/[FOLDER_ID]/logs/[LOG_ID]` // // `[LOG_ID]` must be URL-encoded. For example, // `"projects/my-project-id/logs/syslog"`, - // `"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"`. + // `"organizations/123/logs/cloudaudit.googleapis.com%2Factivity"`. + // // For more information about log names, see // [LogEntry][google.logging.v2.LogEntry]. string log_name = 1 [ @@ -163,15 +161,15 @@ message WriteLogEntriesRequest { // Optional. A default log resource name that is assigned to all log entries // in `entries` that do not specify a value for `log_name`: // - // "projects/[PROJECT_ID]/logs/[LOG_ID]" - // "organizations/[ORGANIZATION_ID]/logs/[LOG_ID]" - // "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]" - // "folders/[FOLDER_ID]/logs/[LOG_ID]" + // * `projects/[PROJECT_ID]/logs/[LOG_ID]` + // * `organizations/[ORGANIZATION_ID]/logs/[LOG_ID]` + // * `billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]` + // * `folders/[FOLDER_ID]/logs/[LOG_ID]` // // `[LOG_ID]` must be URL-encoded. For example: // // "projects/my-project-id/logs/syslog" - // "organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity" + // "organizations/123/logs/cloudaudit.googleapis.com%2Factivity" // // The permission `logging.logEntries.create` is needed on each project, // organization, billing account, or folder that is receiving new log @@ -214,14 +212,14 @@ message WriteLogEntriesRequest { // the entries later in the list. See the `entries.list` method. // // Log entries with timestamps that are more than the - // [logs retention period](https://cloud.google.com/logging/quota-policy) in + // [logs retention period](https://cloud.google.com/logging/quotas) in // the past or more than 24 hours in the future will not be available when // calling `entries.list`. However, those log entries can still be [exported // with // LogSinks](https://cloud.google.com/logging/docs/api/tasks/exporting-logs). // // To improve throughput and to avoid exceeding the - // [quota limit](https://cloud.google.com/logging/quota-policy) for calls to + // [quota limit](https://cloud.google.com/logging/quotas) for calls to // `entries.write`, you should try to include several log entries in this // list, rather than calling this method for each individual log entry. repeated LogEntry entries = 4 [(google.api.field_behavior) = REQUIRED]; @@ -240,7 +238,9 @@ message WriteLogEntriesRequest { } // Result returned from WriteLogEntries. -message WriteLogEntriesResponse {} +message WriteLogEntriesResponse { + +} // Error details for WriteLogEntries with partial success. message WriteLogEntriesPartialErrors { @@ -258,16 +258,17 @@ message ListLogEntriesRequest { // Required. Names of one or more parent resources from which to // retrieve log entries: // - // "projects/[PROJECT_ID]" - // "organizations/[ORGANIZATION_ID]" - // "billingAccounts/[BILLING_ACCOUNT_ID]" - // "folders/[FOLDER_ID]" + // * `projects/[PROJECT_ID]` + // * `organizations/[ORGANIZATION_ID]` + // * `billingAccounts/[BILLING_ACCOUNT_ID]` + // * `folders/[FOLDER_ID]` // - // May alternatively be one or more views - // projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] - // organization/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] - // billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] - // folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] + // May alternatively be one or more views: + // + // * `projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` + // * `organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` + // * `billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` + // * `folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` // // Projects listed in the `project_ids` field are added to this list. repeated string resource_names = 8 [ @@ -294,10 +295,10 @@ message ListLogEntriesRequest { // timestamps are returned in order of their `insert_id` values. string order_by = 3 [(google.api.field_behavior) = OPTIONAL]; - // Optional. The maximum number of results to return from this request. - // Default is 50. If the value is negative or exceeds 1000, - // the request is rejected. The presence of `next_page_token` in the - // response indicates that more results might be available. + // Optional. The maximum number of results to return from this request. Default is 50. + // If the value is negative or exceeds 1000, the request is rejected. The + // presence of `next_page_token` in the response indicates that more results + // might be available. int32 page_size = 4 [(google.api.field_behavior) = OPTIONAL]; // Optional. If present, then retrieve the next batch of results from the @@ -356,10 +357,10 @@ message ListMonitoredResourceDescriptorsResponse { message ListLogsRequest { // Required. The resource name that owns the logs: // - // "projects/[PROJECT_ID]" - // "organizations/[ORGANIZATION_ID]" - // "billingAccounts/[BILLING_ACCOUNT_ID]" - // "folders/[FOLDER_ID]" + // * `projects/[PROJECT_ID]` + // * `organizations/[ORGANIZATION_ID]` + // * `billingAccounts/[BILLING_ACCOUNT_ID]` + // * `folders/[FOLDER_ID]` string parent = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { @@ -379,17 +380,24 @@ message ListLogsRequest { string page_token = 3 [(google.api.field_behavior) = OPTIONAL]; // Optional. The resource name that owns the logs: - // projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] - // organization/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] - // billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] - // folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] + // + // * `projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` + // * `organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` + // * `billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` + // * `folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` // // To support legacy queries, it could also be: - // "projects/[PROJECT_ID]" - // "organizations/[ORGANIZATION_ID]" - // "billingAccounts/[BILLING_ACCOUNT_ID]" - // "folders/[FOLDER_ID]" - repeated string resource_names = 8 [(google.api.field_behavior) = OPTIONAL]; + // + // * `projects/[PROJECT_ID]` + // * `organizations/[ORGANIZATION_ID]` + // * `billingAccounts/[BILLING_ACCOUNT_ID]` + // * `folders/[FOLDER_ID]` + repeated string resource_names = 8 [ + (google.api.field_behavior) = OPTIONAL, + (google.api.resource_reference) = { + child_type: "logging.googleapis.com/Log" + } + ]; } // Result returned from ListLogs. @@ -409,16 +417,17 @@ message ListLogsResponse { message TailLogEntriesRequest { // Required. Name of a parent resource from which to retrieve log entries: // - // "projects/[PROJECT_ID]" - // "organizations/[ORGANIZATION_ID]" - // "billingAccounts/[BILLING_ACCOUNT_ID]" - // "folders/[FOLDER_ID]" + // * `projects/[PROJECT_ID]` + // * `organizations/[ORGANIZATION_ID]` + // * `billingAccounts/[BILLING_ACCOUNT_ID]` + // * `folders/[FOLDER_ID]` // // May alternatively be one or more views: - // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]" - // "organization/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]" - // "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]" - // "folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]" + // + // * `projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` + // * `organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` + // * `billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` + // * `folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` repeated string resource_names = 1 [(google.api.field_behavior) = REQUIRED]; // Optional. A filter that chooses which log entries to return. See [Advanced diff --git a/opentelemetry-stackdriver/proto/google/logging/v2/logging_config.proto b/opentelemetry-stackdriver/proto/google/logging/v2/logging_config.proto index 9b10932d63..ef0024063d 100644 --- a/opentelemetry-stackdriver/proto/google/logging/v2/logging_config.proto +++ b/opentelemetry-stackdriver/proto/google/logging/v2/logging_config.proto @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,14 +16,14 @@ syntax = "proto3"; package google.logging.v2; +import "google/api/annotations.proto"; import "google/api/client.proto"; import "google/api/field_behavior.proto"; import "google/api/resource.proto"; -import "google/protobuf/duration.proto"; +import "google/longrunning/operations.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/field_mask.proto"; import "google/protobuf/timestamp.proto"; -import "google/api/annotations.proto"; option cc_enable_arenas = true; option csharp_namespace = "Google.Cloud.Logging.V2"; @@ -55,7 +55,7 @@ service ConfigServiceV2 { "https://www.googleapis.com/auth/logging.admin," "https://www.googleapis.com/auth/logging.read"; - // Lists buckets. + // Lists log buckets. rpc ListBuckets(ListBucketsRequest) returns (ListBucketsResponse) { option (google.api.http) = { get: "/v2/{parent=*/*/locations/*}/buckets" @@ -75,7 +75,7 @@ service ConfigServiceV2 { option (google.api.method_signature) = "parent"; } - // Gets a bucket. + // Gets a log bucket. rpc GetBucket(GetBucketRequest) returns (LogBucket) { option (google.api.http) = { get: "/v2/{name=*/*/locations/*/buckets/*}" @@ -94,8 +94,8 @@ service ConfigServiceV2 { }; } - // Creates a bucket that can be used to store log entries. Once a bucket has - // been created, the region cannot be changed. + // Creates a log bucket that can be used to store log entries. After a bucket + // has been created, the bucket's location cannot be changed. rpc CreateBucket(CreateBucketRequest) returns (LogBucket) { option (google.api.http) = { post: "/v2/{parent=*/*/locations/*}/buckets" @@ -119,16 +119,16 @@ service ConfigServiceV2 { }; } - // Updates a bucket. This method replaces the following fields in the + // Updates a log bucket. This method replaces the following fields in the // existing bucket with values from the new bucket: `retention_period` // // If the retention period is decreased and the bucket is locked, - // FAILED_PRECONDITION will be returned. + // `FAILED_PRECONDITION` will be returned. // - // If the bucket has a LifecycleState of DELETE_REQUESTED, FAILED_PRECONDITION - // will be returned. + // If the bucket has a `lifecycle_state` of `DELETE_REQUESTED`, then + // `FAILED_PRECONDITION` will be returned. // - // A buckets region may not be modified after it is created. + // After a bucket has been created, the bucket's location cannot be changed. rpc UpdateBucket(UpdateBucketRequest) returns (LogBucket) { option (google.api.http) = { patch: "/v2/{name=*/*/locations/*/buckets/*}" @@ -152,10 +152,11 @@ service ConfigServiceV2 { }; } - // Deletes a bucket. - // Moves the bucket to the DELETE_REQUESTED state. After 7 days, the - // bucket will be purged and all logs in the bucket will be permanently - // deleted. + // Deletes a log bucket. + // + // Changes the bucket's `lifecycle_state` to the `DELETE_REQUESTED` state. + // After 7 days, the bucket will be purged and all log entries in the bucket + // will be permanently deleted. rpc DeleteBucket(DeleteBucketRequest) returns (google.protobuf.Empty) { option (google.api.http) = { delete: "/v2/{name=*/*/locations/*/buckets/*}" @@ -174,8 +175,8 @@ service ConfigServiceV2 { }; } - // Undeletes a bucket. A bucket that has been deleted may be undeleted within - // the grace period of 7 days. + // Undeletes a log bucket. A bucket that has been deleted can be undeleted + // within the grace period of 7 days. rpc UndeleteBucket(UndeleteBucketRequest) returns (google.protobuf.Empty) { option (google.api.http) = { post: "/v2/{name=*/*/locations/*/buckets/*}:undelete" @@ -199,7 +200,7 @@ service ConfigServiceV2 { }; } - // Lists views on a bucket. + // Lists views on a log bucket. rpc ListViews(ListViewsRequest) returns (ListViewsResponse) { option (google.api.http) = { get: "/v2/{parent=*/*/locations/*/buckets/*}/views" @@ -219,7 +220,7 @@ service ConfigServiceV2 { option (google.api.method_signature) = "parent"; } - // Gets a view. + // Gets a view on a log bucket.. rpc GetView(GetViewRequest) returns (LogView) { option (google.api.http) = { get: "/v2/{name=*/*/locations/*/buckets/*/views/*}" @@ -238,8 +239,8 @@ service ConfigServiceV2 { }; } - // Creates a view over logs in a bucket. A bucket may contain a maximum of - // 50 views. + // Creates a view over log entries in a log bucket. A bucket may contain a + // maximum of 30 views. rpc CreateView(CreateViewRequest) returns (LogView) { option (google.api.http) = { post: "/v2/{parent=*/*/locations/*/buckets/*}/views" @@ -263,8 +264,11 @@ service ConfigServiceV2 { }; } - // Updates a view. This method replaces the following fields in the existing - // view with values from the new view: `filter`. + // Updates a view on a log bucket. This method replaces the following fields + // in the existing view with values from the new view: `filter`. + // If an `UNAVAILABLE` error is returned, this indicates that system is not in + // a state where it can update the view. If this occurs, please try again in a + // few minutes. rpc UpdateView(UpdateViewRequest) returns (LogView) { option (google.api.http) = { patch: "/v2/{name=*/*/locations/*/buckets/*/views/*}" @@ -288,7 +292,10 @@ service ConfigServiceV2 { }; } - // Deletes a view from a bucket. + // Deletes a view on a log bucket. + // If an `UNAVAILABLE` error is returned, this indicates that system is not in + // a state where it can delete the view. If this occurs, please try again in a + // few minutes. rpc DeleteView(DeleteViewRequest) returns (google.protobuf.Empty) { option (google.api.http) = { delete: "/v2/{name=*/*/locations/*/buckets/*/views/*}" @@ -442,7 +449,7 @@ service ConfigServiceV2 { option (google.api.method_signature) = "sink_name"; } - // Lists all the exclusions in a parent resource. + // Lists all the exclusions on the _Default sink in a parent resource. rpc ListExclusions(ListExclusionsRequest) returns (ListExclusionsResponse) { option (google.api.http) = { get: "/v2/{parent=*/*}/exclusions" @@ -462,7 +469,7 @@ service ConfigServiceV2 { option (google.api.method_signature) = "parent"; } - // Gets the description of an exclusion. + // Gets the description of an exclusion in the _Default sink. rpc GetExclusion(GetExclusionRequest) returns (LogExclusion) { option (google.api.http) = { get: "/v2/{name=*/*/exclusions/*}" @@ -482,9 +489,9 @@ service ConfigServiceV2 { option (google.api.method_signature) = "name"; } - // Creates a new exclusion in a specified parent resource. - // Only log entries belonging to that resource can be excluded. - // You can have up to 10 exclusions in a resource. + // Creates a new exclusion in the _Default sink in a specified parent + // resource. Only log entries belonging to that resource can be excluded. You + // can have up to 10 exclusions in a resource. rpc CreateExclusion(CreateExclusionRequest) returns (LogExclusion) { option (google.api.http) = { post: "/v2/{parent=*/*}/exclusions" @@ -509,7 +516,8 @@ service ConfigServiceV2 { option (google.api.method_signature) = "parent,exclusion"; } - // Changes one or more properties of an existing exclusion. + // Changes one or more properties of an existing exclusion in the _Default + // sink. rpc UpdateExclusion(UpdateExclusionRequest) returns (LogExclusion) { option (google.api.http) = { patch: "/v2/{name=*/*/exclusions/*}" @@ -534,7 +542,7 @@ service ConfigServiceV2 { option (google.api.method_signature) = "name,exclusion,update_mask"; } - // Deletes an exclusion. + // Deletes an exclusion in the _Default sink. rpc DeleteExclusion(DeleteExclusionRequest) returns (google.protobuf.Empty) { option (google.api.http) = { delete: "/v2/{name=*/*/exclusions/*}" @@ -554,29 +562,39 @@ service ConfigServiceV2 { option (google.api.method_signature) = "name"; } - // Gets the Logs Router CMEK settings for the given resource. + // Gets the Logging CMEK settings for the given resource. // - // Note: CMEK for the Logs Router can currently only be configured for GCP - // organizations. Once configured, it applies to all projects and folders in - // the GCP organization. + // Note: CMEK for the Log Router can be configured for Google Cloud projects, + // folders, organizations and billing accounts. Once configured for an + // organization, it applies to all projects and folders in the Google Cloud + // organization. // - // See [Enabling CMEK for Logs + // See [Enabling CMEK for Log // Router](https://cloud.google.com/logging/docs/routing/managed-encryption) // for more information. rpc GetCmekSettings(GetCmekSettingsRequest) returns (CmekSettings) { option (google.api.http) = { get: "/v2/{name=*/*}/cmekSettings" + additional_bindings { + get: "/v2/{name=projects/*}/cmekSettings" + } additional_bindings { get: "/v2/{name=organizations/*}/cmekSettings" } + additional_bindings { + get: "/v2/{name=folders/*}/cmekSettings" + } + additional_bindings { + get: "/v2/{name=billingAccounts/*}/cmekSettings" + } }; } - // Updates the Logs Router CMEK settings for the given resource. + // Updates the Log Router CMEK settings for the given resource. // - // Note: CMEK for the Logs Router can currently only be configured for GCP - // organizations. Once configured, it applies to all projects and folders in - // the GCP organization. + // Note: CMEK for the Log Router can currently only be configured for Google + // Cloud organizations. Once configured, it applies to all projects and + // folders in the Google Cloud organization. // // [UpdateCmekSettings][google.logging.v2.ConfigServiceV2.UpdateCmekSettings] // will fail if 1) `kms_key_name` is invalid, or 2) the associated service @@ -584,7 +602,7 @@ service ConfigServiceV2 { // `roles/cloudkms.cryptoKeyEncrypterDecrypter` role assigned for the key, or // 3) access to the key is disabled. // - // See [Enabling CMEK for Logs + // See [Enabling CMEK for Log // Router](https://cloud.google.com/logging/docs/routing/managed-encryption) // for more information. rpc UpdateCmekSettings(UpdateCmekSettingsRequest) returns (CmekSettings) { @@ -597,9 +615,82 @@ service ConfigServiceV2 { } }; } + + // Gets the Log Router settings for the given resource. + // + // Note: Settings for the Log Router can be get for Google Cloud projects, + // folders, organizations and billing accounts. Currently it can only be + // configured for organizations. Once configured for an organization, it + // applies to all projects and folders in the Google Cloud organization. + // + // See [Enabling CMEK for Log + // Router](https://cloud.google.com/logging/docs/routing/managed-encryption) + // for more information. + rpc GetSettings(GetSettingsRequest) returns (Settings) { + option (google.api.http) = { + get: "/v2/{name=*/*}/settings" + additional_bindings { + get: "/v2/{name=projects/*}/settings" + } + additional_bindings { + get: "/v2/{name=organizations/*}/settings" + } + additional_bindings { + get: "/v2/{name=folders/*}/settings" + } + additional_bindings { + get: "/v2/{name=billingAccounts/*}/settings" + } + }; + option (google.api.method_signature) = "name"; + } + + // Updates the Log Router settings for the given resource. + // + // Note: Settings for the Log Router can currently only be configured for + // Google Cloud organizations. Once configured, it applies to all projects and + // folders in the Google Cloud organization. + // + // [UpdateSettings][google.logging.v2.ConfigServiceV2.UpdateSettings] + // will fail if 1) `kms_key_name` is invalid, or 2) the associated service + // account does not have the required + // `roles/cloudkms.cryptoKeyEncrypterDecrypter` role assigned for the key, or + // 3) access to the key is disabled. 4) `location_id` is not supported by + // Logging. 5) `location_id` violate OrgPolicy. + // + // See [Enabling CMEK for Log + // Router](https://cloud.google.com/logging/docs/routing/managed-encryption) + // for more information. + rpc UpdateSettings(UpdateSettingsRequest) returns (Settings) { + option (google.api.http) = { + patch: "/v2/{name=*/*}/settings" + body: "settings" + additional_bindings { + patch: "/v2/{name=organizations/*}/settings" + body: "settings" + } + additional_bindings { + patch: "/v2/{name=folders/*}/settings" + body: "settings" + } + }; + option (google.api.method_signature) = "settings,update_mask"; + } + + // Copies a set of log entries from a log bucket to a Cloud Storage bucket. + rpc CopyLogEntries(CopyLogEntriesRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v2/entries:copy" + body: "*" + }; + option (google.longrunning.operation_info) = { + response_type: "CopyLogEntriesResponse" + metadata_type: "CopyLogEntriesMetadata" + }; + } } -// Describes a repository of logs. +// Describes a repository in which log entries are stored. message LogBucket { option (google.api.resource) = { type: "logging.googleapis.com/LogBucket" @@ -609,16 +700,20 @@ message LogBucket { pattern: "billingAccounts/{billing_account}/locations/{location}/buckets/{bucket}" }; - // The resource name of the bucket. + // Output only. The resource name of the bucket. + // // For example: - // "projects/my-project-id/locations/my-location/buckets/my-bucket-id The - // supported locations are: - // "global" // - // For the location of `global` it is unspecified where logs are actually - // stored. - // Once a bucket has been created, the location can not be changed. - string name = 1; + // `projects/my-project/locations/global/buckets/my-bucket` + // + // For a list of supported locations, see [Supported + // Regions](https://cloud.google.com/logging/docs/region-support) + // + // For the location of `global` it is unspecified where log entries are + // actually stored. + // + // After a bucket has been created, the location cannot be changed. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; // Describes this bucket. string description = 3; @@ -631,34 +726,38 @@ message LogBucket { google.protobuf.Timestamp update_time = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; // Logs will be retained by default for this amount of time, after which they - // will automatically be deleted. The minimum retention period is 1 day. - // If this value is set to zero at bucket creation time, the default time of - // 30 days will be used. + // will automatically be deleted. The minimum retention period is 1 day. If + // this value is set to zero at bucket creation time, the default time of 30 + // days will be used. int32 retention_days = 11; - // Whether the bucket has been locked. - // The retention period on a locked bucket may not be changed. - // Locked buckets may only be deleted if they are empty. + // Whether the bucket is locked. + // + // The retention period on a locked bucket cannot be changed. Locked buckets + // may only be deleted if they are empty. bool locked = 9; // Output only. The bucket lifecycle state. LifecycleState lifecycle_state = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; -} -// LogBucket lifecycle states. -enum LifecycleState { - // Unspecified state. This is only used/useful for distinguishing - // unset values. - LIFECYCLE_STATE_UNSPECIFIED = 0; - - // The normal and active state. - ACTIVE = 1; + // Log entry field paths that are denied access in this bucket. + // + // The following fields and their children are eligible: `textPayload`, + // `jsonPayload`, `protoPayload`, `httpRequest`, `labels`, `sourceLocation`. + // + // Restricting a repeated field will restrict all values. Adding a parent will + // block all child fields. (e.g. `foo.bar` will block `foo.bar.baz`) + repeated string restricted_fields = 15; - // The bucket has been marked for deletion by the user. - DELETE_REQUESTED = 2; + // The CMEK settings of the log bucket. If present, new log entries written to + // this log bucket are encrypted using the CMEK key provided in this + // configuration. If a log bucket has CMEK settings, the CMEK settings cannot + // be disabled later by updating the log bucket. Changing the KMS key is + // allowed. + CmekSettings cmek_settings = 19; } -// Describes a view over logs in a bucket. +// Describes a view over log entries in a bucket. message LogView { option (google.api.resource) = { type: "logging.googleapis.com/LogView" @@ -669,8 +768,10 @@ message LogView { }; // The resource name of the view. - // For example - // "projects/my-project-id/locations/my-location/buckets/my-bucket-id/views/my-view + // + // For example: + // + // `projects/my-project/locations/global/buckets/my-bucket/views/my-view` string name = 1; // Describes this view. @@ -683,21 +784,27 @@ message LogView { google.protobuf.Timestamp update_time = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; // Filter that restricts which log entries in a bucket are visible in this - // view. Filters are restricted to be a logical AND of ==/!= of any of the + // view. + // + // Filters are restricted to be a logical AND of ==/!= of any of the // following: - // originating project/folder/organization/billing account. - // resource type - // log id - // Example: SOURCE("projects/myproject") AND resource.type = "gce_instance" - // AND LOG_ID("stdout") + // + // - originating project/folder/organization/billing account. + // - resource type + // - log id + // + // For example: + // + // SOURCE("projects/myproject") AND resource.type = "gce_instance" + // AND LOG_ID("stdout") string filter = 7; } // Describes a sink used to export log entries to one of the following -// destinations in any project: a Cloud Storage bucket, a BigQuery dataset, or a -// Cloud Pub/Sub topic. A logs filter controls which log entries are exported. -// The sink must be created within a project, organization, billing account, or -// folder. +// destinations in any project: a Cloud Storage bucket, a BigQuery dataset, a +// Pub/Sub topic or a Cloud Logging log bucket. A logs filter controls which log +// entries are exported. The sink must be created within a project, +// organization, billing account, or folder. message LogSink { option (google.api.resource) = { type: "logging.googleapis.com/LogSink" @@ -719,9 +826,10 @@ message LogSink { V1 = 2; } - // Required. The client-assigned sink identifier, unique within the project. Example: - // `"my-syslog-errors-to-pubsub"`. Sink identifiers are limited to 100 - // characters and can include only the following characters: upper and + // Required. The client-assigned sink identifier, unique within the project. + // + // For example: `"my-syslog-errors-to-pubsub"`. Sink identifiers are limited + // to 100 characters and can include only the following characters: upper and // lower-case alphanumeric characters, underscores, hyphens, and periods. // First character has to be alphanumeric. string name = 1 [(google.api.field_behavior) = REQUIRED]; @@ -732,9 +840,9 @@ message LogSink { // "bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]" // "pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]" // - // The sink's `writer_identity`, set when the sink is created, must - // have permission to write to the destination or else the log - // entries are not exported. For more information, see + // The sink's `writer_identity`, set when the sink is created, must have + // permission to write to the destination or else the log entries are not + // exported. For more information, see // [Exporting Logs with // Sinks](https://cloud.google.com/logging/docs/api/tasks/exporting-logs). string destination = 3 [ @@ -747,20 +855,24 @@ message LogSink { // Optional. An [advanced logs // filter](https://cloud.google.com/logging/docs/view/advanced-queries). The // only exported log entries are those that are in the resource owning the - // sink and that match the filter. For example: + // sink and that match the filter. + // + // For example: // - // logName="projects/[PROJECT_ID]/logs/[LOG_ID]" AND severity>=ERROR + // `logName="projects/[PROJECT_ID]/logs/[LOG_ID]" AND severity>=ERROR` string filter = 5 [(google.api.field_behavior) = OPTIONAL]; // Optional. A description of this sink. + // // The maximum length of the description is 8000 characters. string description = 18 [(google.api.field_behavior) = OPTIONAL]; - // Optional. If set to True, then this sink is disabled and it does not - // export any log entries. + // Optional. If set to true, then this sink is disabled and it does not export any log + // entries. bool disabled = 19 [(google.api.field_behavior) = OPTIONAL]; - // Optional. Log entries that match any of the exclusion filters will not be exported. + // Optional. Log entries that match any of these exclusion filters will not be exported. + // // If a log entry is matched by both `filter` and one of `exclusion_filters` // it will not be exported. repeated LogExclusion exclusions = 16 [(google.api.field_behavior) = OPTIONAL]; @@ -768,33 +880,42 @@ message LogSink { // Deprecated. This field is unused. VersionFormat output_version_format = 6 [deprecated = true]; - // Output only. An IAM identity—a service account or group—under which Logging - // writes the exported log entries to the sink's destination. This field is - // set by [sinks.create][google.logging.v2.ConfigServiceV2.CreateSink] and + // Output only. An IAM identity—a service account or group—under which Cloud + // Logging writes the exported log entries to the sink's destination. This + // field is set by + // [sinks.create][google.logging.v2.ConfigServiceV2.CreateSink] and // [sinks.update][google.logging.v2.ConfigServiceV2.UpdateSink] based on the // value of `unique_writer_identity` in those methods. // // Until you grant this identity write-access to the destination, log entry - // exports from this sink will fail. For more information, - // see [Granting Access for a + // exports from this sink will fail. For more information, see [Granting + // Access for a // Resource](https://cloud.google.com/iam/docs/granting-roles-to-service-accounts#granting_access_to_a_service_account_for_a_resource). // Consult the destination service's documentation to determine the // appropriate IAM roles to assign to the identity. + // + // Sinks that have a destination that is a log bucket in the same project as + // the sink do not have a writer_identity and no additional permissions are + // required. string writer_identity = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; - // Optional. This field applies only to sinks owned by organizations and - // folders. If the field is false, the default, only the logs owned by the - // sink's parent resource are available for export. If the field is true, then - // logs from all the projects, folders, and billing accounts contained in the + // Optional. This field applies only to sinks owned by organizations and folders. If the + // field is false, the default, only the logs owned by the sink's parent + // resource are available for export. If the field is true, then log entries + // from all the projects, folders, and billing accounts contained in the // sink's parent resource are also available for export. Whether a particular // log entry from the children is exported depends on the sink's filter - // expression. For example, if this field is true, then the filter + // expression. + // + // For example, if this field is true, then the filter // `resource.type=gce_instance` would export all Compute Engine VM instance - // log entries from all projects in the sink's parent. To only export entries - // from certain child projects, filter on the project part of the log name: + // log entries from all projects in the sink's parent. + // + // To only export entries from certain child projects, filter on the project + // part of the log name: // - // logName:("projects/test-project1/" OR "projects/test-project2/") AND - // resource.type=gce_instance + // logName:("projects/test-project1/" OR "projects/test-project2/") AND + // resource.type=gce_instance bool include_children = 9 [(google.api.field_behavior) = OPTIONAL]; // Destination dependent options. @@ -818,16 +939,17 @@ message LogSink { message BigQueryOptions { // Optional. Whether to use [BigQuery's partition // tables](https://cloud.google.com/bigquery/docs/partitioned-tables). By - // default, Logging creates dated tables based on the log entries' timestamps, - // e.g. syslog_20170523. With partitioned tables the date suffix is no longer - // present and [special query + // default, Cloud Logging creates dated tables based on the log entries' + // timestamps, e.g. syslog_20170523. With partitioned tables the date suffix + // is no longer present and [special query // syntax](https://cloud.google.com/bigquery/docs/querying-partitioned-tables) // has to be used instead. In both cases, tables are sharded based on UTC // timezone. bool use_partitioned_tables = 1 [(google.api.field_behavior) = OPTIONAL]; - // Output only. True if new timestamp column based partitioning is in use, - // false if legacy ingestion-time partitioning is in use. + // Output only. True if new timestamp column based partitioning is in use, false if legacy + // ingestion-time partitioning is in use. + // // All new sinks will have this field set true and will use timestamp column // based partitioning. If use_partitioned_tables is false, this value has no // meaning and will be false. Legacy sinks using partitioned tables will have @@ -854,15 +976,15 @@ message ListBucketsRequest { } ]; - // Optional. If present, then retrieve the next batch of results from the - // preceding call to this method. `pageToken` must be the value of - // `nextPageToken` from the previous response. The values of other method - // parameters should be identical to those in the previous call. + // Optional. If present, then retrieve the next batch of results from the preceding call + // to this method. `pageToken` must be the value of `nextPageToken` from the + // previous response. The values of other method parameters should be + // identical to those in the previous call. string page_token = 2 [(google.api.field_behavior) = OPTIONAL]; - // Optional. The maximum number of results to return from this request. - // Non-positive values are ignored. The presence of `nextPageToken` in the - // response indicates that more results might be available. + // Optional. The maximum number of results to return from this request. Non-positive + // values are ignored. The presence of `nextPageToken` in the response + // indicates that more results might be available. int32 page_size = 3 [(google.api.field_behavior) = OPTIONAL]; } @@ -879,11 +1001,13 @@ message ListBucketsResponse { // The parameters to `CreateBucket`. message CreateBucketRequest { - // Required. The resource in which to create the bucket: + // Required. The resource in which to create the log bucket: // // "projects/[PROJECT_ID]/locations/[LOCATION_ID]" // - // Example: `"projects/my-logging-project/locations/global"` + // For example: + // + // `"projects/my-project/locations/global"` string parent = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { @@ -891,9 +1015,9 @@ message CreateBucketRequest { } ]; - // Required. A client-assigned identifier such as `"my-bucket"`. Identifiers are - // limited to 100 characters and can include only letters, digits, - // underscores, hyphens, and periods. + // Required. A client-assigned identifier such as `"my-bucket"`. Identifiers are limited + // to 100 characters and can include only letters, digits, underscores, + // hyphens, and periods. string bucket_id = 2 [(google.api.field_behavior) = REQUIRED]; // Required. The new bucket. The region specified in the new bucket must be compliant @@ -911,10 +1035,9 @@ message UpdateBucketRequest { // "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" // "folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" // - // Example: - // `"projects/my-project-id/locations/my-location/buckets/my-bucket-id"`. Also - // requires permission "resourcemanager.projects.updateLiens" to set the - // locked property + // For example: + // + // `"projects/my-project/locations/global/buckets/my-bucket"` string name = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { @@ -926,13 +1049,13 @@ message UpdateBucketRequest { LogBucket bucket = 2 [(google.api.field_behavior) = REQUIRED]; // Required. Field mask that specifies the fields in `bucket` that need an update. A - // bucket field will be overwritten if, and only if, it is in the update - // mask. `name` and output only fields cannot be updated. + // bucket field will be overwritten if, and only if, it is in the update mask. + // `name` and output only fields cannot be updated. // - // For a detailed `FieldMask` definition, see + // For a detailed `FieldMask` definition, see: // https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMask // - // Example: `updateMask=retention_days`. + // For example: `updateMask=retention_days` google.protobuf.FieldMask update_mask = 4 [(google.api.field_behavior) = REQUIRED]; } @@ -945,8 +1068,9 @@ message GetBucketRequest { // "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" // "folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" // - // Example: - // `"projects/my-project-id/locations/my-location/buckets/my-bucket-id"`. + // For example: + // + // `"projects/my-project/locations/global/buckets/my-bucket"` string name = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { @@ -964,8 +1088,9 @@ message DeleteBucketRequest { // "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" // "folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" // - // Example: - // `"projects/my-project-id/locations/my-location/buckets/my-bucket-id"`. + // For example: + // + // `"projects/my-project/locations/global/buckets/my-bucket"` string name = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { @@ -983,8 +1108,9 @@ message UndeleteBucketRequest { // "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" // "folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" // - // Example: - // `"projects/my-project-id/locations/my-location/buckets/my-bucket-id"`. + // For example: + // + // `"projects/my-project/locations/global/buckets/my-bucket"` string name = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { @@ -1000,13 +1126,14 @@ message ListViewsRequest { // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" string parent = 1 [(google.api.field_behavior) = REQUIRED]; - // Optional. If present, then retrieve the next batch of results from the - // preceding call to this method. `pageToken` must be the value of - // `nextPageToken` from the previous response. The values of other method - // parameters should be identical to those in the previous call. + // Optional. If present, then retrieve the next batch of results from the preceding call + // to this method. `pageToken` must be the value of `nextPageToken` from the + // previous response. The values of other method parameters should be + // identical to those in the previous call. string page_token = 2 [(google.api.field_behavior) = OPTIONAL]; // Optional. The maximum number of results to return from this request. + // // Non-positive values are ignored. The presence of `nextPageToken` in the // response indicates that more results might be available. int32 page_size = 3 [(google.api.field_behavior) = OPTIONAL]; @@ -1027,10 +1154,11 @@ message ListViewsResponse { message CreateViewRequest { // Required. The bucket in which to create the view // - // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // `"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]"` + // + // For example: // - // Example: - // `"projects/my-logging-project/locations/my-location/buckets/my-bucket"` + // `"projects/my-project/locations/global/buckets/my-bucket"` string parent = 1 [(google.api.field_behavior) = REQUIRED]; // Required. The id to use for this view. @@ -1046,8 +1174,9 @@ message UpdateViewRequest { // // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]" // - // Example: - // `"projects/my-project-id/locations/my-location/buckets/my-bucket-id/views/my-view-id"`. + // For example: + // + // `"projects/my-project/locations/global/buckets/my-bucket/views/my-view"` string name = 1 [(google.api.field_behavior) = REQUIRED]; // Required. The updated view. @@ -1060,7 +1189,7 @@ message UpdateViewRequest { // For a detailed `FieldMask` definition, see // https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMask // - // Example: `updateMask=filter`. + // For example: `updateMask=filter` google.protobuf.FieldMask update_mask = 4 [(google.api.field_behavior) = OPTIONAL]; } @@ -1070,8 +1199,9 @@ message GetViewRequest { // // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]" // - // Example: - // `"projects/my-project-id/locations/my-location/buckets/my-bucket-id/views/my-view-id"`. + // For example: + // + // `"projects/my-project/locations/global/buckets/my-bucket/views/my-view"` string name = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { @@ -1086,8 +1216,9 @@ message DeleteViewRequest { // // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]" // - // Example: - // `"projects/my-project-id/locations/my-location/buckets/my-bucket-id/views/my-view-id"`. + // For example: + // + // `"projects/my-project/locations/global/buckets/my-bucket/views/my-view"` string name = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { @@ -1143,7 +1274,9 @@ message GetSinkRequest { // "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" // "folders/[FOLDER_ID]/sinks/[SINK_ID]" // - // Example: `"projects/my-project-id/sinks/my-sink-id"`. + // For example: + // + // `"projects/my-project/sinks/my-sink"` string sink_name = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { @@ -1161,7 +1294,10 @@ message CreateSinkRequest { // "billingAccounts/[BILLING_ACCOUNT_ID]" // "folders/[FOLDER_ID]" // - // Examples: `"projects/my-logging-project"`, `"organizations/123456789"`. + // For examples: + // + // `"projects/my-project"` + // `"organizations/123456789"` string parent = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { @@ -1176,9 +1312,9 @@ message CreateSinkRequest { // Optional. Determines the kind of IAM identity returned as `writer_identity` // in the new sink. If this value is omitted or set to false, and if the // sink's parent is a project, then the value returned as `writer_identity` is - // the same group or service account used by Logging before the addition of - // writer identities to this API. The sink's destination must be in the same - // project as the sink itself. + // the same group or service account used by Cloud Logging before the addition + // of writer identities to this API. The sink's destination must be in the + // same project as the sink itself. // // If this field is set to true, or if the sink is owned by a non-project // resource such as an organization, then the value of `writer_identity` will @@ -1197,7 +1333,9 @@ message UpdateSinkRequest { // "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" // "folders/[FOLDER_ID]/sinks/[SINK_ID]" // - // Example: `"projects/my-project-id/sinks/my-sink-id"`. + // For example: + // + // `"projects/my-project/sinks/my-sink"` string sink_name = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { @@ -1226,16 +1364,18 @@ message UpdateSinkRequest { // an update. A sink field will be overwritten if, and only if, it is // in the update mask. `name` and output only fields cannot be updated. // - // An empty updateMask is temporarily treated as using the following mask + // An empty `updateMask` is temporarily treated as using the following mask // for backwards compatibility purposes: - // destination,filter,includeChildren + // + // `destination,filter,includeChildren` + // // At some point in the future, behavior will be removed and specifying an - // empty updateMask will be an error. + // empty `updateMask` will be an error. // // For a detailed `FieldMask` definition, see // https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMask // - // Example: `updateMask=filter`. + // For example: `updateMask=filter` google.protobuf.FieldMask update_mask = 4 [(google.api.field_behavior) = OPTIONAL]; } @@ -1249,7 +1389,9 @@ message DeleteSinkRequest { // "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" // "folders/[FOLDER_ID]/sinks/[SINK_ID]" // - // Example: `"projects/my-project-id/sinks/my-sink-id"`. + // For example: + // + // `"projects/my-project/sinks/my-sink"` string sink_name = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { @@ -1258,12 +1400,11 @@ message DeleteSinkRequest { ]; } -// Specifies a set of log entries that are not to be stored in -// Logging. If your GCP resource receives a large volume of logs, you can -// use exclusions to reduce your chargeable logs. Exclusions are -// processed after log sinks, so you can export log entries before they are -// excluded. Note that organization-level and folder-level exclusions don't -// apply to child resources, and that you can't exclude audit log entries. +// Specifies a set of log entries that are filtered out by a sink. If +// your Google Cloud resource receives a large volume of log entries, you can +// use exclusions to reduce your chargeable logs. Note that exclusions on +// organization-level and folder-level sinks don't apply to child resources. +// Note also that you cannot modify the _Required sink or exclude logs from it. message LogExclusion { option (google.api.resource) = { type: "logging.googleapis.com/LogExclusion" @@ -1287,10 +1428,11 @@ message LogExclusion { // matches the log entries to be excluded. By using the [sample // function](https://cloud.google.com/logging/docs/view/advanced-queries#sample), // you can exclude less than 100% of the matching log entries. - // For example, the following query matches 99% of low-severity log - // entries from Google Cloud Storage buckets: // - // `"resource.type=gcs_bucket severity) -> ExportResult { + fn export(&mut self, batch: Vec) -> BoxFuture<'static, ExportResult> { match self.tx.try_send(batch) { - Err(e) => Err(e.into()), + Err(e) => Box::pin(std::future::ready(Err(e.into()))), Ok(()) => { self.pending_count.fetch_add(1, Ordering::Relaxed); - Ok(()) + Box::pin(std::future::ready(Ok(()))) } } } @@ -419,7 +422,7 @@ impl Authorizer for YupAuthorizer { req.metadata_mut().insert( "authorization", - MetadataValue::from_str(&format!("Bearer {}", token.as_str())).unwrap(), + MetadataValue::try_from(format!("Bearer {}", token.as_str())).unwrap(), ); Ok(()) } @@ -472,7 +475,7 @@ impl Authorizer for GcpAuthorizer { req.metadata_mut().insert( "authorization", - MetadataValue::from_str(&format!("Bearer {}", token.as_str())).unwrap(), + MetadataValue::try_from(format!("Bearer {}", token.as_str())).unwrap(), ); Ok(()) @@ -498,7 +501,7 @@ impl From for AttributeValue { Value::Bool(v) => attribute_value::Value::BoolValue(v), Value::F64(v) => attribute_value::Value::StringValue(to_truncate(v.to_string())), Value::I64(v) => attribute_value::Value::IntValue(v), - Value::String(v) => attribute_value::Value::StringValue(to_truncate(v.into_owned())), + Value::String(v) => attribute_value::Value::StringValue(to_truncate(v.to_string())), Value::Array(_) => attribute_value::Value::StringValue(to_truncate(v.to_string())), }; AttributeValue { @@ -650,20 +653,6 @@ pub enum MonitoredResource { }, } -const TRACE_APPEND: &str = "https://www.googleapis.com/auth/trace.append"; -const LOGGING_WRITE: &str = "https://www.googleapis.com/auth/logging.write"; -const HTTP_PATH_ATTRIBUTE: &str = "http.path"; - -const GCP_HTTP_HOST: &str = "/http/host"; -const GCP_HTTP_METHOD: &str = "/http/method"; -const GCP_HTTP_TARGET: &str = "/http/path"; -const GCP_HTTP_URL: &str = "/http/url"; -const GCP_HTTP_USER_AGENT: &str = "/http/user_agent"; -const GCP_HTTP_STATUS_CODE: &str = "/http/status_code"; -const GCP_HTTP_ROUTE: &str = "/http/route"; -const GCP_HTTP_PATH: &str = "/http/path"; -const GCP_SERVICE_NAME: &str = "g.co/gae/app/module"; - impl From for Attributes { fn from(attributes: EvictedHashMap) -> Self { let mut dropped_attributes_count: i32 = 0; @@ -676,42 +665,18 @@ impl From for Attributes { return None; } - if semcov::trace::HTTP_HOST == k { - return Some((GCP_HTTP_HOST.to_owned(), v.into())); - } - - if semcov::trace::HTTP_METHOD == k { - return Some((GCP_HTTP_METHOD.to_owned(), v.into())); - } - - if semcov::trace::HTTP_TARGET == k { - return Some((GCP_HTTP_TARGET.to_owned(), v.into())); - } - - if semcov::trace::HTTP_URL == k { - return Some((GCP_HTTP_URL.to_owned(), v.into())); - } - - if semcov::trace::HTTP_USER_AGENT == k { - return Some((GCP_HTTP_USER_AGENT.to_owned(), v.into())); - } - - if semcov::trace::HTTP_STATUS_CODE == k { - return Some((GCP_HTTP_STATUS_CODE.to_owned(), v.into())); - } - - if semcov::trace::HTTP_ROUTE == k { - return Some((GCP_HTTP_ROUTE.to_owned(), v.into())); - }; - - if semcov::resource::SERVICE_NAME == k { + if k == SERVICE_NAME { return Some((GCP_SERVICE_NAME.to_owned(), v.into())); - }; - - if HTTP_PATH_ATTRIBUTE == key { + } else if key == HTTP_PATH_ATTRIBUTE { return Some((GCP_HTTP_PATH.to_owned(), v.into())); } + for (otel_key, gcp_key) in KEY_MAP { + if otel_key == &k { + return Some((gcp_key.to_owned(), v.into())); + } + } + Some((key.to_owned(), v.into())) }) .collect(); @@ -722,6 +687,23 @@ impl From for Attributes { } } +// Map conventional OpenTelemetry keys to their GCP counterparts. +const KEY_MAP: [(&Key, &str); 7] = [ + (&HTTP_HOST, "/http/host"), + (&HTTP_METHOD, "/http/method"), + (&HTTP_TARGET, "/http/path"), + (&HTTP_URL, "/http/url"), + (&HTTP_USER_AGENT, "/http/user_agent"), + (&HTTP_STATUS_CODE, "/http/status_code"), + (&HTTP_ROUTE, "/http/route"), +]; + +const TRACE_APPEND: &str = "https://www.googleapis.com/auth/trace.append"; +const LOGGING_WRITE: &str = "https://www.googleapis.com/auth/logging.write"; +const HTTP_PATH_ATTRIBUTE: &str = "http.path"; +const GCP_HTTP_PATH: &str = "/http/path"; +const GCP_SERVICE_NAME: &str = "g.co/gae/app/module"; + #[cfg(test)] mod tests { use super::*; diff --git a/opentelemetry-stackdriver/src/proto/devtools/cloudtrace/v2.rs b/opentelemetry-stackdriver/src/proto/devtools/cloudtrace/v2.rs index ff77d7ea2d..f79f99d63c 100644 --- a/opentelemetry-stackdriver/src/proto/devtools/cloudtrace/v2.rs +++ b/opentelemetry-stackdriver/src/proto/devtools/cloudtrace/v2.rs @@ -391,21 +391,21 @@ pub struct BatchWriteSpansRequest { #[prost(message, repeated, tag = "2")] pub spans: ::prost::alloc::vec::Vec, } -#[doc = r" Generated client implementations."] +/// Generated client implementations. pub mod trace_service_client { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; - #[doc = " This file describes an API for collecting and viewing traces and spans"] - #[doc = " within a trace. A Trace is a collection of spans corresponding to a single"] - #[doc = " operation or set of operations for an application. A span is an individual"] - #[doc = " timed event which forms a node of the trace tree. A single trace may"] - #[doc = " contain span(s) from multiple services."] + /// This file describes an API for collecting and viewing traces and spans + /// within a trace. A Trace is a collection of spans corresponding to a single + /// operation or set of operations for an application. A span is an individual + /// timed event which forms a node of the trace tree. A single trace may + /// contain span(s) from multiple services. #[derive(Debug, Clone)] pub struct TraceServiceClient { inner: tonic::client::Grpc, } impl TraceServiceClient { - #[doc = r" Attempt to create a new client by connecting to a given endpoint."] + /// Attempt to create a new client by connecting to a given endpoint. pub async fn connect(dst: D) -> Result where D: std::convert::TryInto, @@ -418,8 +418,8 @@ pub mod trace_service_client { impl TraceServiceClient where T: tonic::client::GrpcService, - T::ResponseBody: Body + Send + 'static, T::Error: Into, + T::ResponseBody: Body + Send + 'static, ::Error: Into + Send, { pub fn new(inner: T) -> Self { @@ -432,6 +432,7 @@ pub mod trace_service_client { ) -> TraceServiceClient> where F: tonic::service::Interceptor, + T::ResponseBody: Default, T: tonic::codegen::Service< http::Request, Response = http::Response< @@ -443,21 +444,23 @@ pub mod trace_service_client { { TraceServiceClient::new(InterceptedService::new(inner, interceptor)) } - #[doc = r" Compress requests with `gzip`."] - #[doc = r""] - #[doc = r" This requires the server to support it otherwise it might respond with an"] - #[doc = r" error."] + /// Compress requests with `gzip`. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] pub fn send_gzip(mut self) -> Self { self.inner = self.inner.send_gzip(); self } - #[doc = r" Enable decompressing responses with `gzip`."] + /// Enable decompressing responses with `gzip`. + #[must_use] pub fn accept_gzip(mut self) -> Self { self.inner = self.inner.accept_gzip(); self } - #[doc = " Sends new spans to new or existing traces. You cannot update"] - #[doc = " existing spans."] + /// Sends new spans to new or existing traces. You cannot update + /// existing spans. pub async fn batch_write_spans( &mut self, request: impl tonic::IntoRequest, @@ -474,7 +477,7 @@ pub mod trace_service_client { ); self.inner.unary(request.into_request(), path, codec).await } - #[doc = " Creates a new span."] + /// Creates a new span. pub async fn create_span( &mut self, request: impl tonic::IntoRequest, diff --git a/opentelemetry-stackdriver/src/proto/logging/v2.rs b/opentelemetry-stackdriver/src/proto/logging/v2.rs index 396575bd80..6cd2e843ea 100644 --- a/opentelemetry-stackdriver/src/proto/logging/v2.rs +++ b/opentelemetry-stackdriver/src/proto/logging/v2.rs @@ -1,6 +1,4 @@ /// An individual entry in a log. -/// -/// #[derive(Clone, PartialEq, ::prost::Message)] pub struct LogEntry { /// Required. The resource name of the log to which this log entry belongs: @@ -16,1542 +14,213 @@ pub struct LogEntry { /// /// `\[LOG_ID\]` must be URL-encoded within `log_name`. Example: /// `"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"`. - /// `\[LOG_ID\]` must be less than 512 characters long and can only include the - /// following characters: upper and lower case alphanumeric characters, - /// forward-slash, underscore, hyphen, and period. - /// - /// For backward compatibility, if `log_name` begins with a forward-slash, such - /// as `/projects/...`, then the log entry is ingested as usual but the - /// forward-slash is removed. Listing the log entry will not show the leading - /// slash and filtering for a log name with a leading slash will never return - /// any results. - #[prost(string, tag = "12")] - pub log_name: ::prost::alloc::string::String, - /// Required. The monitored resource that produced this log entry. - /// - /// Example: a log entry that reports a database error would be associated with - /// the monitored resource designating the particular database that reported - /// the error. - #[prost(message, optional, tag = "8")] - pub resource: ::core::option::Option, - /// Optional. The time the event described by the log entry occurred. This time is used - /// to compute the log entry's age and to enforce the logs retention period. - /// If this field is omitted in a new log entry, then Logging assigns it the - /// current time. Timestamps have nanosecond accuracy, but trailing zeros in - /// the fractional seconds might be omitted when the timestamp is displayed. - /// - /// Incoming log entries must have timestamps that don't exceed the - /// [logs retention - /// period]() in - /// the past, and that don't exceed 24 hours in the future. Log entries outside - /// those time boundaries aren't ingested by Logging. - #[prost(message, optional, tag = "9")] - pub timestamp: ::core::option::Option<::prost_types::Timestamp>, - /// Output only. The time the log entry was received by Logging. - #[prost(message, optional, tag = "24")] - pub receive_timestamp: ::core::option::Option<::prost_types::Timestamp>, - /// Optional. The severity of the log entry. The default value is `LogSeverity.DEFAULT`. - #[prost(enumeration = "super::r#type::LogSeverity", tag = "10")] - pub severity: i32, - /// Optional. A unique identifier for the log entry. If you provide a value, then - /// Logging considers other log entries in the same project, with the same - /// `timestamp`, and with the same `insert_id` to be duplicates which are - /// removed in a single query result. However, there are no guarantees of - /// de-duplication in the export of logs. - /// - /// If the `insert_id` is omitted when writing a log entry, the Logging API - /// assigns its own unique identifier in this field. - /// - /// In queries, the `insert_id` is also used to order log entries that have - /// the same `log_name` and `timestamp` values. - #[prost(string, tag = "4")] - pub insert_id: ::prost::alloc::string::String, - /// Optional. Information about the HTTP request associated with this log entry, if - /// applicable. - #[prost(message, optional, tag = "7")] - pub http_request: ::core::option::Option, - /// Optional. A set of user-defined (key, value) data that provides additional - /// information about the log entry. - #[prost(map = "string, string", tag = "11")] - pub labels: - ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, - /// Optional. Information about an operation associated with the log entry, if - /// applicable. - #[prost(message, optional, tag = "15")] - pub operation: ::core::option::Option, - /// Optional. Resource name of the trace associated with the log entry, if any. If it - /// contains a relative resource name, the name is assumed to be relative to - /// `//tracing.googleapis.com`. Example: - /// `projects/my-projectid/traces/06796866738c859f2f19b7cfb3214824` - #[prost(string, tag = "22")] - pub trace: ::prost::alloc::string::String, - /// Optional. The span ID within the trace associated with the log entry. - /// - /// For Trace spans, this is the same format that the Trace API v2 uses: a - /// 16-character hexadecimal encoding of an 8-byte array, such as - /// `000000000000004a`. - #[prost(string, tag = "27")] - pub span_id: ::prost::alloc::string::String, - /// Optional. The sampling decision of the trace associated with the log entry. - /// - /// True means that the trace resource name in the `trace` field was sampled - /// for storage in a trace backend. False means that the trace was not sampled - /// for storage when this log entry was written, or the sampling decision was - /// unknown at the time. A non-sampled `trace` value is still useful as a - /// request correlation identifier. The default is False. - #[prost(bool, tag = "30")] - pub trace_sampled: bool, - /// Optional. Source code location information associated with the log entry, if any. - #[prost(message, optional, tag = "23")] - pub source_location: ::core::option::Option, - /// The log entry payload, which can be one of multiple types. - #[prost(oneof = "log_entry::Payload", tags = "2, 3, 6")] - pub payload: ::core::option::Option, -} -/// Nested message and enum types in `LogEntry`. -pub mod log_entry { - /// The log entry payload, which can be one of multiple types. - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Payload { - /// The log entry payload, represented as a protocol buffer. Some Google - /// Cloud Platform services use this field for their log entry payloads. - /// - /// The following protocol buffer types are supported; user-defined types - /// are not supported: - /// - /// "type.googleapis.com/google.cloud.audit.AuditLog" - /// "type.googleapis.com/google.appengine.logging.v1.RequestLog" - #[prost(message, tag = "2")] - ProtoPayload(::prost_types::Any), - /// The log entry payload, represented as a Unicode string (UTF-8). - #[prost(string, tag = "3")] - TextPayload(::prost::alloc::string::String), - /// The log entry payload, represented as a structure that is - /// expressed as a JSON object. - #[prost(message, tag = "6")] - JsonPayload(::prost_types::Struct), - } -} -/// Additional information about a potentially long-running operation with which -/// a log entry is associated. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct LogEntryOperation { - /// Optional. An arbitrary operation identifier. Log entries with the same - /// identifier are assumed to be part of the same operation. - #[prost(string, tag = "1")] - pub id: ::prost::alloc::string::String, - /// Optional. An arbitrary producer identifier. The combination of `id` and - /// `producer` must be globally unique. Examples for `producer`: - /// `"MyDivision.MyBigCompany.com"`, `"github.com/MyProject/MyApplication"`. - #[prost(string, tag = "2")] - pub producer: ::prost::alloc::string::String, - /// Optional. Set this to True if this is the first log entry in the operation. - #[prost(bool, tag = "3")] - pub first: bool, - /// Optional. Set this to True if this is the last log entry in the operation. - #[prost(bool, tag = "4")] - pub last: bool, -} -/// Additional information about the source code location that produced the log -/// entry. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct LogEntrySourceLocation { - /// Optional. Source file name. Depending on the runtime environment, this - /// might be a simple name or a fully-qualified name. - #[prost(string, tag = "1")] - pub file: ::prost::alloc::string::String, - /// Optional. Line within the source file. 1-based; 0 indicates no line number - /// available. - #[prost(int64, tag = "2")] - pub line: i64, - /// Optional. Human-readable name of the function or method being invoked, with - /// optional context such as the class or package name. This information may be - /// used in contexts such as the logs viewer, where a file and line number are - /// less meaningful. The format can vary by language. For example: - /// `qual.if.ied.Class.method` (Java), `dir/package.func` (Go), `function` - /// (Python). - #[prost(string, tag = "3")] - pub function: ::prost::alloc::string::String, -} -/// Describes a repository of logs. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct LogBucket { - /// The resource name of the bucket. - /// For example: - /// "projects/my-project-id/locations/my-location/buckets/my-bucket-id The - /// supported locations are: - /// "global" - /// - /// For the location of `global` it is unspecified where logs are actually - /// stored. - /// Once a bucket has been created, the location can not be changed. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// Describes this bucket. - #[prost(string, tag = "3")] - pub description: ::prost::alloc::string::String, - /// Output only. The creation timestamp of the bucket. This is not set for any of the - /// default buckets. - #[prost(message, optional, tag = "4")] - pub create_time: ::core::option::Option<::prost_types::Timestamp>, - /// Output only. The last update timestamp of the bucket. - #[prost(message, optional, tag = "5")] - pub update_time: ::core::option::Option<::prost_types::Timestamp>, - /// Logs will be retained by default for this amount of time, after which they - /// will automatically be deleted. The minimum retention period is 1 day. - /// If this value is set to zero at bucket creation time, the default time of - /// 30 days will be used. - #[prost(int32, tag = "11")] - pub retention_days: i32, - /// Whether the bucket has been locked. - /// The retention period on a locked bucket may not be changed. - /// Locked buckets may only be deleted if they are empty. - #[prost(bool, tag = "9")] - pub locked: bool, - /// Output only. The bucket lifecycle state. - #[prost(enumeration = "LifecycleState", tag = "12")] - pub lifecycle_state: i32, -} -/// Describes a view over logs in a bucket. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct LogView { - /// The resource name of the view. - /// For example - /// "projects/my-project-id/locations/my-location/buckets/my-bucket-id/views/my-view - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// Describes this view. - #[prost(string, tag = "3")] - pub description: ::prost::alloc::string::String, - /// Output only. The creation timestamp of the view. - #[prost(message, optional, tag = "4")] - pub create_time: ::core::option::Option<::prost_types::Timestamp>, - /// Output only. The last update timestamp of the view. - #[prost(message, optional, tag = "5")] - pub update_time: ::core::option::Option<::prost_types::Timestamp>, - /// Filter that restricts which log entries in a bucket are visible in this - /// view. Filters are restricted to be a logical AND of ==/!= of any of the - /// following: - /// originating project/folder/organization/billing account. - /// resource type - /// log id - /// Example: SOURCE("projects/myproject") AND resource.type = "gce_instance" - /// AND LOG_ID("stdout") - #[prost(string, tag = "7")] - pub filter: ::prost::alloc::string::String, -} -/// Describes a sink used to export log entries to one of the following -/// destinations in any project: a Cloud Storage bucket, a BigQuery dataset, or a -/// Cloud Pub/Sub topic. A logs filter controls which log entries are exported. -/// The sink must be created within a project, organization, billing account, or -/// folder. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct LogSink { - /// Required. The client-assigned sink identifier, unique within the project. Example: - /// `"my-syslog-errors-to-pubsub"`. Sink identifiers are limited to 100 - /// characters and can include only the following characters: upper and - /// lower-case alphanumeric characters, underscores, hyphens, and periods. - /// First character has to be alphanumeric. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// Required. The export destination: - /// - /// "storage.googleapis.com/\[GCS_BUCKET\]" - /// "bigquery.googleapis.com/projects/\[PROJECT_ID]/datasets/[DATASET\]" - /// "pubsub.googleapis.com/projects/\[PROJECT_ID]/topics/[TOPIC_ID\]" - /// - /// The sink's `writer_identity`, set when the sink is created, must - /// have permission to write to the destination or else the log - /// entries are not exported. For more information, see - /// [Exporting Logs with - /// Sinks](). - #[prost(string, tag = "3")] - pub destination: ::prost::alloc::string::String, - /// Optional. An [advanced logs - /// filter](). The - /// only exported log entries are those that are in the resource owning the - /// sink and that match the filter. For example: - /// - /// logName="projects/\[PROJECT_ID]/logs/[LOG_ID\]" AND severity>=ERROR - #[prost(string, tag = "5")] - pub filter: ::prost::alloc::string::String, - /// Optional. A description of this sink. - /// The maximum length of the description is 8000 characters. - #[prost(string, tag = "18")] - pub description: ::prost::alloc::string::String, - /// Optional. If set to True, then this sink is disabled and it does not - /// export any log entries. - #[prost(bool, tag = "19")] - pub disabled: bool, - /// Optional. Log entries that match any of the exclusion filters will not be exported. - /// If a log entry is matched by both `filter` and one of `exclusion_filters` - /// it will not be exported. - #[prost(message, repeated, tag = "16")] - pub exclusions: ::prost::alloc::vec::Vec, - /// Deprecated. This field is unused. - #[deprecated] - #[prost(enumeration = "log_sink::VersionFormat", tag = "6")] - pub output_version_format: i32, - /// Output only. An IAM identity—a service account or group—under which Logging - /// writes the exported log entries to the sink's destination. This field is - /// set by \[sinks.create][google.logging.v2.ConfigServiceV2.CreateSink\] and - /// \[sinks.update][google.logging.v2.ConfigServiceV2.UpdateSink\] based on the - /// value of `unique_writer_identity` in those methods. - /// - /// Until you grant this identity write-access to the destination, log entry - /// exports from this sink will fail. For more information, - /// see [Granting Access for a - /// Resource](). - /// Consult the destination service's documentation to determine the - /// appropriate IAM roles to assign to the identity. - #[prost(string, tag = "8")] - pub writer_identity: ::prost::alloc::string::String, - /// Optional. This field applies only to sinks owned by organizations and - /// folders. If the field is false, the default, only the logs owned by the - /// sink's parent resource are available for export. If the field is true, then - /// logs from all the projects, folders, and billing accounts contained in the - /// sink's parent resource are also available for export. Whether a particular - /// log entry from the children is exported depends on the sink's filter - /// expression. For example, if this field is true, then the filter - /// `resource.type=gce_instance` would export all Compute Engine VM instance - /// log entries from all projects in the sink's parent. To only export entries - /// from certain child projects, filter on the project part of the log name: - /// - /// logName:("projects/test-project1/" OR "projects/test-project2/") AND - /// resource.type=gce_instance - #[prost(bool, tag = "9")] - pub include_children: bool, - /// Output only. The creation timestamp of the sink. - /// - /// This field may not be present for older sinks. - #[prost(message, optional, tag = "13")] - pub create_time: ::core::option::Option<::prost_types::Timestamp>, - /// Output only. The last update timestamp of the sink. - /// - /// This field may not be present for older sinks. - #[prost(message, optional, tag = "14")] - pub update_time: ::core::option::Option<::prost_types::Timestamp>, - /// Destination dependent options. - #[prost(oneof = "log_sink::Options", tags = "12")] - pub options: ::core::option::Option, -} -/// Nested message and enum types in `LogSink`. -pub mod log_sink { - /// Deprecated. This is unused. - #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] - #[repr(i32)] - pub enum VersionFormat { - /// An unspecified format version that will default to V2. - Unspecified = 0, - /// `LogEntry` version 2 format. - V2 = 1, - /// `LogEntry` version 1 format. - V1 = 2, - } - /// Destination dependent options. - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Options { - /// Optional. Options that affect sinks exporting data to BigQuery. - #[prost(message, tag = "12")] - BigqueryOptions(super::BigQueryOptions), - } -} -/// Options that change functionality of a sink exporting data to BigQuery. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BigQueryOptions { - /// Optional. Whether to use [BigQuery's partition - /// tables](). By - /// default, Logging creates dated tables based on the log entries' timestamps, - /// e.g. syslog_20170523. With partitioned tables the date suffix is no longer - /// present and [special query - /// syntax]() - /// has to be used instead. In both cases, tables are sharded based on UTC - /// timezone. - #[prost(bool, tag = "1")] - pub use_partitioned_tables: bool, - /// Output only. True if new timestamp column based partitioning is in use, - /// false if legacy ingestion-time partitioning is in use. - /// All new sinks will have this field set true and will use timestamp column - /// based partitioning. If use_partitioned_tables is false, this value has no - /// meaning and will be false. Legacy sinks using partitioned tables will have - /// this field set to false. - #[prost(bool, tag = "3")] - pub uses_timestamp_column_partitioning: bool, -} -/// The parameters to `ListBuckets`. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ListBucketsRequest { - /// Required. The parent resource whose buckets are to be listed: - /// - /// "projects/\[PROJECT_ID]/locations/[LOCATION_ID\]" - /// "organizations/\[ORGANIZATION_ID]/locations/[LOCATION_ID\]" - /// "billingAccounts/\[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID\]" - /// "folders/\[FOLDER_ID]/locations/[LOCATION_ID\]" - /// - /// Note: The locations portion of the resource must be specified, but - /// supplying the character `-` in place of \[LOCATION_ID\] will return all - /// buckets. - #[prost(string, tag = "1")] - pub parent: ::prost::alloc::string::String, - /// Optional. If present, then retrieve the next batch of results from the - /// preceding call to this method. `pageToken` must be the value of - /// `nextPageToken` from the previous response. The values of other method - /// parameters should be identical to those in the previous call. - #[prost(string, tag = "2")] - pub page_token: ::prost::alloc::string::String, - /// Optional. The maximum number of results to return from this request. - /// Non-positive values are ignored. The presence of `nextPageToken` in the - /// response indicates that more results might be available. - #[prost(int32, tag = "3")] - pub page_size: i32, -} -/// The response from ListBuckets. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ListBucketsResponse { - /// A list of buckets. - #[prost(message, repeated, tag = "1")] - pub buckets: ::prost::alloc::vec::Vec, - /// If there might be more results than appear in this response, then - /// `nextPageToken` is included. To get the next set of results, call the same - /// method again using the value of `nextPageToken` as `pageToken`. - #[prost(string, tag = "2")] - pub next_page_token: ::prost::alloc::string::String, -} -/// The parameters to `CreateBucket`. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CreateBucketRequest { - /// Required. The resource in which to create the bucket: - /// - /// "projects/\[PROJECT_ID]/locations/[LOCATION_ID\]" - /// - /// Example: `"projects/my-logging-project/locations/global"` - #[prost(string, tag = "1")] - pub parent: ::prost::alloc::string::String, - /// Required. A client-assigned identifier such as `"my-bucket"`. Identifiers are - /// limited to 100 characters and can include only letters, digits, - /// underscores, hyphens, and periods. - #[prost(string, tag = "2")] - pub bucket_id: ::prost::alloc::string::String, - /// Required. The new bucket. The region specified in the new bucket must be compliant - /// with any Location Restriction Org Policy. The name field in the bucket is - /// ignored. - #[prost(message, optional, tag = "3")] - pub bucket: ::core::option::Option, -} -/// The parameters to `UpdateBucket`. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct UpdateBucketRequest { - /// Required. The full resource name of the bucket to update. - /// - /// "projects/\[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID\]" - /// "organizations/\[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID\]" - /// "billingAccounts/\[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID\]" - /// "folders/\[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID\]" - /// - /// Example: - /// `"projects/my-project-id/locations/my-location/buckets/my-bucket-id"`. Also - /// requires permission "resourcemanager.projects.updateLiens" to set the - /// locked property - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// Required. The updated bucket. - #[prost(message, optional, tag = "2")] - pub bucket: ::core::option::Option, - /// Required. Field mask that specifies the fields in `bucket` that need an update. A - /// bucket field will be overwritten if, and only if, it is in the update - /// mask. `name` and output only fields cannot be updated. - /// - /// For a detailed `FieldMask` definition, see - /// - /// - /// Example: `updateMask=retention_days`. - #[prost(message, optional, tag = "4")] - pub update_mask: ::core::option::Option<::prost_types::FieldMask>, -} -/// The parameters to `GetBucket`. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetBucketRequest { - /// Required. The resource name of the bucket: - /// - /// "projects/\[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID\]" - /// "organizations/\[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID\]" - /// "billingAccounts/\[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID\]" - /// "folders/\[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID\]" - /// - /// Example: - /// `"projects/my-project-id/locations/my-location/buckets/my-bucket-id"`. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, -} -/// The parameters to `DeleteBucket`. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DeleteBucketRequest { - /// Required. The full resource name of the bucket to delete. - /// - /// "projects/\[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID\]" - /// "organizations/\[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID\]" - /// "billingAccounts/\[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID\]" - /// "folders/\[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID\]" - /// - /// Example: - /// `"projects/my-project-id/locations/my-location/buckets/my-bucket-id"`. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, -} -/// The parameters to `UndeleteBucket`. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct UndeleteBucketRequest { - /// Required. The full resource name of the bucket to undelete. - /// - /// "projects/\[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID\]" - /// "organizations/\[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID\]" - /// "billingAccounts/\[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID\]" - /// "folders/\[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID\]" - /// - /// Example: - /// `"projects/my-project-id/locations/my-location/buckets/my-bucket-id"`. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, -} -/// The parameters to `ListViews`. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ListViewsRequest { - /// Required. The bucket whose views are to be listed: - /// - /// "projects/\[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID\]" - #[prost(string, tag = "1")] - pub parent: ::prost::alloc::string::String, - /// Optional. If present, then retrieve the next batch of results from the - /// preceding call to this method. `pageToken` must be the value of - /// `nextPageToken` from the previous response. The values of other method - /// parameters should be identical to those in the previous call. - #[prost(string, tag = "2")] - pub page_token: ::prost::alloc::string::String, - /// Optional. The maximum number of results to return from this request. - /// Non-positive values are ignored. The presence of `nextPageToken` in the - /// response indicates that more results might be available. - #[prost(int32, tag = "3")] - pub page_size: i32, -} -/// The response from ListViews. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ListViewsResponse { - /// A list of views. - #[prost(message, repeated, tag = "1")] - pub views: ::prost::alloc::vec::Vec, - /// If there might be more results than appear in this response, then - /// `nextPageToken` is included. To get the next set of results, call the same - /// method again using the value of `nextPageToken` as `pageToken`. - #[prost(string, tag = "2")] - pub next_page_token: ::prost::alloc::string::String, -} -/// The parameters to `CreateView`. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CreateViewRequest { - /// Required. The bucket in which to create the view - /// - /// "projects/\[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID\]" - /// - /// Example: - /// `"projects/my-logging-project/locations/my-location/buckets/my-bucket"` - #[prost(string, tag = "1")] - pub parent: ::prost::alloc::string::String, - /// Required. The id to use for this view. - #[prost(string, tag = "2")] - pub view_id: ::prost::alloc::string::String, - /// Required. The new view. - #[prost(message, optional, tag = "3")] - pub view: ::core::option::Option, -} -/// The parameters to `UpdateView`. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct UpdateViewRequest { - /// Required. The full resource name of the view to update - /// - /// "projects/\[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]" - /// - /// Example: - /// `"projects/my-project-id/locations/my-location/buckets/my-bucket-id/views/my-view-id"`. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// Required. The updated view. - #[prost(message, optional, tag = "2")] - pub view: ::core::option::Option, - /// Optional. Field mask that specifies the fields in `view` that need - /// an update. A field will be overwritten if, and only if, it is - /// in the update mask. `name` and output only fields cannot be updated. - /// - /// For a detailed `FieldMask` definition, see - /// - /// - /// Example: `updateMask=filter`. - #[prost(message, optional, tag = "4")] - pub update_mask: ::core::option::Option<::prost_types::FieldMask>, -} -/// The parameters to `GetView`. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetViewRequest { - /// Required. The resource name of the policy: - /// - /// "projects/\[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]" - /// - /// Example: - /// `"projects/my-project-id/locations/my-location/buckets/my-bucket-id/views/my-view-id"`. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, -} -/// The parameters to `DeleteView`. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DeleteViewRequest { - /// Required. The full resource name of the view to delete: - /// - /// "projects/\[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]" - /// - /// Example: - /// `"projects/my-project-id/locations/my-location/buckets/my-bucket-id/views/my-view-id"`. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, -} -/// The parameters to `ListSinks`. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ListSinksRequest { - /// Required. The parent resource whose sinks are to be listed: - /// - /// "projects/\[PROJECT_ID\]" - /// "organizations/\[ORGANIZATION_ID\]" - /// "billingAccounts/\[BILLING_ACCOUNT_ID\]" - /// "folders/\[FOLDER_ID\]" - #[prost(string, tag = "1")] - pub parent: ::prost::alloc::string::String, - /// Optional. If present, then retrieve the next batch of results from the - /// preceding call to this method. `pageToken` must be the value of - /// `nextPageToken` from the previous response. The values of other method - /// parameters should be identical to those in the previous call. - #[prost(string, tag = "2")] - pub page_token: ::prost::alloc::string::String, - /// Optional. The maximum number of results to return from this request. - /// Non-positive values are ignored. The presence of `nextPageToken` in the - /// response indicates that more results might be available. - #[prost(int32, tag = "3")] - pub page_size: i32, -} -/// Result returned from `ListSinks`. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ListSinksResponse { - /// A list of sinks. - #[prost(message, repeated, tag = "1")] - pub sinks: ::prost::alloc::vec::Vec, - /// If there might be more results than appear in this response, then - /// `nextPageToken` is included. To get the next set of results, call the same - /// method again using the value of `nextPageToken` as `pageToken`. - #[prost(string, tag = "2")] - pub next_page_token: ::prost::alloc::string::String, -} -/// The parameters to `GetSink`. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetSinkRequest { - /// Required. The resource name of the sink: - /// - /// "projects/\[PROJECT_ID]/sinks/[SINK_ID\]" - /// "organizations/\[ORGANIZATION_ID]/sinks/[SINK_ID\]" - /// "billingAccounts/\[BILLING_ACCOUNT_ID]/sinks/[SINK_ID\]" - /// "folders/\[FOLDER_ID]/sinks/[SINK_ID\]" - /// - /// Example: `"projects/my-project-id/sinks/my-sink-id"`. - #[prost(string, tag = "1")] - pub sink_name: ::prost::alloc::string::String, -} -/// The parameters to `CreateSink`. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CreateSinkRequest { - /// Required. The resource in which to create the sink: - /// - /// "projects/\[PROJECT_ID\]" - /// "organizations/\[ORGANIZATION_ID\]" - /// "billingAccounts/\[BILLING_ACCOUNT_ID\]" - /// "folders/\[FOLDER_ID\]" - /// - /// Examples: `"projects/my-logging-project"`, `"organizations/123456789"`. - #[prost(string, tag = "1")] - pub parent: ::prost::alloc::string::String, - /// Required. The new sink, whose `name` parameter is a sink identifier that - /// is not already in use. - #[prost(message, optional, tag = "2")] - pub sink: ::core::option::Option, - /// Optional. Determines the kind of IAM identity returned as `writer_identity` - /// in the new sink. If this value is omitted or set to false, and if the - /// sink's parent is a project, then the value returned as `writer_identity` is - /// the same group or service account used by Logging before the addition of - /// writer identities to this API. The sink's destination must be in the same - /// project as the sink itself. - /// - /// If this field is set to true, or if the sink is owned by a non-project - /// resource such as an organization, then the value of `writer_identity` will - /// be a unique service account used only for exports from the new sink. For - /// more information, see `writer_identity` in \[LogSink][google.logging.v2.LogSink\]. - #[prost(bool, tag = "3")] - pub unique_writer_identity: bool, -} -/// The parameters to `UpdateSink`. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct UpdateSinkRequest { - /// Required. The full resource name of the sink to update, including the parent - /// resource and the sink identifier: - /// - /// "projects/\[PROJECT_ID]/sinks/[SINK_ID\]" - /// "organizations/\[ORGANIZATION_ID]/sinks/[SINK_ID\]" - /// "billingAccounts/\[BILLING_ACCOUNT_ID]/sinks/[SINK_ID\]" - /// "folders/\[FOLDER_ID]/sinks/[SINK_ID\]" - /// - /// Example: `"projects/my-project-id/sinks/my-sink-id"`. - #[prost(string, tag = "1")] - pub sink_name: ::prost::alloc::string::String, - /// Required. The updated sink, whose name is the same identifier that appears as part - /// of `sink_name`. - #[prost(message, optional, tag = "2")] - pub sink: ::core::option::Option, - /// Optional. See \[sinks.create][google.logging.v2.ConfigServiceV2.CreateSink\] - /// for a description of this field. When updating a sink, the effect of this - /// field on the value of `writer_identity` in the updated sink depends on both - /// the old and new values of this field: - /// - /// + If the old and new values of this field are both false or both true, - /// then there is no change to the sink's `writer_identity`. - /// + If the old value is false and the new value is true, then - /// `writer_identity` is changed to a unique service account. - /// + It is an error if the old value is true and the new value is - /// set to false or defaulted to false. - #[prost(bool, tag = "3")] - pub unique_writer_identity: bool, - /// Optional. Field mask that specifies the fields in `sink` that need - /// an update. A sink field will be overwritten if, and only if, it is - /// in the update mask. `name` and output only fields cannot be updated. - /// - /// An empty updateMask is temporarily treated as using the following mask - /// for backwards compatibility purposes: - /// destination,filter,includeChildren - /// At some point in the future, behavior will be removed and specifying an - /// empty updateMask will be an error. - /// - /// For a detailed `FieldMask` definition, see - /// - /// - /// Example: `updateMask=filter`. - #[prost(message, optional, tag = "4")] - pub update_mask: ::core::option::Option<::prost_types::FieldMask>, -} -/// The parameters to `DeleteSink`. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DeleteSinkRequest { - /// Required. The full resource name of the sink to delete, including the parent - /// resource and the sink identifier: - /// - /// "projects/\[PROJECT_ID]/sinks/[SINK_ID\]" - /// "organizations/\[ORGANIZATION_ID]/sinks/[SINK_ID\]" - /// "billingAccounts/\[BILLING_ACCOUNT_ID]/sinks/[SINK_ID\]" - /// "folders/\[FOLDER_ID]/sinks/[SINK_ID\]" - /// - /// Example: `"projects/my-project-id/sinks/my-sink-id"`. - #[prost(string, tag = "1")] - pub sink_name: ::prost::alloc::string::String, -} -/// Specifies a set of log entries that are not to be stored in -/// Logging. If your GCP resource receives a large volume of logs, you can -/// use exclusions to reduce your chargeable logs. Exclusions are -/// processed after log sinks, so you can export log entries before they are -/// excluded. Note that organization-level and folder-level exclusions don't -/// apply to child resources, and that you can't exclude audit log entries. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct LogExclusion { - /// Required. A client-assigned identifier, such as `"load-balancer-exclusion"`. - /// Identifiers are limited to 100 characters and can include only letters, - /// digits, underscores, hyphens, and periods. First character has to be - /// alphanumeric. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// Optional. A description of this exclusion. - #[prost(string, tag = "2")] - pub description: ::prost::alloc::string::String, - /// Required. An [advanced logs - /// filter]() that - /// matches the log entries to be excluded. By using the [sample - /// function](), - /// you can exclude less than 100% of the matching log entries. - /// For example, the following query matches 99% of low-severity log - /// entries from Google Cloud Storage buckets: - /// - /// `"resource.type=gcs_bucket severity, - /// Output only. The last update timestamp of the exclusion. - /// - /// This field may not be present for older exclusions. - #[prost(message, optional, tag = "6")] - pub update_time: ::core::option::Option<::prost_types::Timestamp>, -} -/// The parameters to `ListExclusions`. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ListExclusionsRequest { - /// Required. The parent resource whose exclusions are to be listed. - /// - /// "projects/\[PROJECT_ID\]" - /// "organizations/\[ORGANIZATION_ID\]" - /// "billingAccounts/\[BILLING_ACCOUNT_ID\]" - /// "folders/\[FOLDER_ID\]" - #[prost(string, tag = "1")] - pub parent: ::prost::alloc::string::String, - /// Optional. If present, then retrieve the next batch of results from the - /// preceding call to this method. `pageToken` must be the value of - /// `nextPageToken` from the previous response. The values of other method - /// parameters should be identical to those in the previous call. - #[prost(string, tag = "2")] - pub page_token: ::prost::alloc::string::String, - /// Optional. The maximum number of results to return from this request. - /// Non-positive values are ignored. The presence of `nextPageToken` in the - /// response indicates that more results might be available. - #[prost(int32, tag = "3")] - pub page_size: i32, -} -/// Result returned from `ListExclusions`. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ListExclusionsResponse { - /// A list of exclusions. - #[prost(message, repeated, tag = "1")] - pub exclusions: ::prost::alloc::vec::Vec, - /// If there might be more results than appear in this response, then - /// `nextPageToken` is included. To get the next set of results, call the same - /// method again using the value of `nextPageToken` as `pageToken`. - #[prost(string, tag = "2")] - pub next_page_token: ::prost::alloc::string::String, -} -/// The parameters to `GetExclusion`. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetExclusionRequest { - /// Required. The resource name of an existing exclusion: - /// - /// "projects/\[PROJECT_ID]/exclusions/[EXCLUSION_ID\]" - /// "organizations/\[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID\]" - /// "billingAccounts/\[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID\]" - /// "folders/\[FOLDER_ID]/exclusions/[EXCLUSION_ID\]" - /// - /// Example: `"projects/my-project-id/exclusions/my-exclusion-id"`. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, -} -/// The parameters to `CreateExclusion`. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CreateExclusionRequest { - /// Required. The parent resource in which to create the exclusion: - /// - /// "projects/\[PROJECT_ID\]" - /// "organizations/\[ORGANIZATION_ID\]" - /// "billingAccounts/\[BILLING_ACCOUNT_ID\]" - /// "folders/\[FOLDER_ID\]" - /// - /// Examples: `"projects/my-logging-project"`, `"organizations/123456789"`. - #[prost(string, tag = "1")] - pub parent: ::prost::alloc::string::String, - /// Required. The new exclusion, whose `name` parameter is an exclusion name - /// that is not already used in the parent resource. - #[prost(message, optional, tag = "2")] - pub exclusion: ::core::option::Option, -} -/// The parameters to `UpdateExclusion`. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct UpdateExclusionRequest { - /// Required. The resource name of the exclusion to update: - /// - /// "projects/\[PROJECT_ID]/exclusions/[EXCLUSION_ID\]" - /// "organizations/\[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID\]" - /// "billingAccounts/\[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID\]" - /// "folders/\[FOLDER_ID]/exclusions/[EXCLUSION_ID\]" - /// - /// Example: `"projects/my-project-id/exclusions/my-exclusion-id"`. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// Required. New values for the existing exclusion. Only the fields specified in - /// `update_mask` are relevant. - #[prost(message, optional, tag = "2")] - pub exclusion: ::core::option::Option, - /// Required. A non-empty list of fields to change in the existing exclusion. New values - /// for the fields are taken from the corresponding fields in the - /// \[LogExclusion][google.logging.v2.LogExclusion\] included in this request. Fields not mentioned in - /// `update_mask` are not changed and are ignored in the request. - /// - /// For example, to change the filter and description of an exclusion, - /// specify an `update_mask` of `"filter,description"`. - #[prost(message, optional, tag = "3")] - pub update_mask: ::core::option::Option<::prost_types::FieldMask>, -} -/// The parameters to `DeleteExclusion`. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DeleteExclusionRequest { - /// Required. The resource name of an existing exclusion to delete: - /// - /// "projects/\[PROJECT_ID]/exclusions/[EXCLUSION_ID\]" - /// "organizations/\[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID\]" - /// "billingAccounts/\[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID\]" - /// "folders/\[FOLDER_ID]/exclusions/[EXCLUSION_ID\]" - /// - /// Example: `"projects/my-project-id/exclusions/my-exclusion-id"`. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, -} -/// The parameters to -/// \[GetCmekSettings][google.logging.v2.ConfigServiceV2.GetCmekSettings\]. -/// -/// See [Enabling CMEK for Logs -/// Router]() for -/// more information. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetCmekSettingsRequest { - /// Required. The resource for which to retrieve CMEK settings. - /// - /// "projects/\[PROJECT_ID\]/cmekSettings" - /// "organizations/\[ORGANIZATION_ID\]/cmekSettings" - /// "billingAccounts/\[BILLING_ACCOUNT_ID\]/cmekSettings" - /// "folders/\[FOLDER_ID\]/cmekSettings" - /// - /// Example: `"organizations/12345/cmekSettings"`. - /// - /// Note: CMEK for the Logs Router can currently only be configured for GCP - /// organizations. Once configured, it applies to all projects and folders in - /// the GCP organization. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, -} -/// The parameters to -/// \[UpdateCmekSettings][google.logging.v2.ConfigServiceV2.UpdateCmekSettings\]. -/// -/// See [Enabling CMEK for Logs -/// Router]() for -/// more information. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct UpdateCmekSettingsRequest { - /// Required. The resource name for the CMEK settings to update. - /// - /// "projects/\[PROJECT_ID\]/cmekSettings" - /// "organizations/\[ORGANIZATION_ID\]/cmekSettings" - /// "billingAccounts/\[BILLING_ACCOUNT_ID\]/cmekSettings" - /// "folders/\[FOLDER_ID\]/cmekSettings" - /// - /// Example: `"organizations/12345/cmekSettings"`. - /// - /// Note: CMEK for the Logs Router can currently only be configured for GCP - /// organizations. Once configured, it applies to all projects and folders in - /// the GCP organization. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// Required. The CMEK settings to update. - /// - /// See [Enabling CMEK for Logs - /// Router]() - /// for more information. - #[prost(message, optional, tag = "2")] - pub cmek_settings: ::core::option::Option, - /// Optional. Field mask identifying which fields from `cmek_settings` should - /// be updated. A field will be overwritten if and only if it is in the update - /// mask. Output only fields cannot be updated. - /// - /// See \[FieldMask][google.protobuf.FieldMask\] for more information. - /// - /// Example: `"updateMask=kmsKeyName"` - #[prost(message, optional, tag = "3")] - pub update_mask: ::core::option::Option<::prost_types::FieldMask>, -} -/// Describes the customer-managed encryption key (CMEK) settings associated with -/// a project, folder, organization, billing account, or flexible resource. -/// -/// Note: CMEK for the Logs Router can currently only be configured for GCP -/// organizations. Once configured, it applies to all projects and folders in the -/// GCP organization. -/// -/// See [Enabling CMEK for Logs -/// Router]() for -/// more information. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CmekSettings { - /// Output only. The resource name of the CMEK settings. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// The resource name for the configured Cloud KMS key. - /// - /// KMS key name format: - /// "projects/\[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY\]" - /// - /// For example: - /// `"projects/my-project-id/locations/my-region/keyRings/key-ring-name/cryptoKeys/key-name"` - /// - /// - /// - /// To enable CMEK for the Logs Router, set this field to a valid - /// `kms_key_name` for which the associated service account has the required - /// `roles/cloudkms.cryptoKeyEncrypterDecrypter` role assigned for the key. - /// - /// The Cloud KMS key used by the Log Router can be updated by changing the - /// `kms_key_name` to a new valid key name. Encryption operations that are in - /// progress will be completed with the key that was in use when they started. - /// Decryption operations will be completed using the key that was used at the - /// time of encryption unless access to that key has been revoked. - /// - /// To disable CMEK for the Logs Router, set this field to an empty string. - /// - /// See [Enabling CMEK for Logs - /// Router]() - /// for more information. - #[prost(string, tag = "2")] - pub kms_key_name: ::prost::alloc::string::String, - /// Output only. The service account that will be used by the Logs Router to access your - /// Cloud KMS key. - /// - /// Before enabling CMEK for Logs Router, you must first assign the role - /// `roles/cloudkms.cryptoKeyEncrypterDecrypter` to the service account that - /// the Logs Router will use to access your Cloud KMS key. Use - /// \[GetCmekSettings][google.logging.v2.ConfigServiceV2.GetCmekSettings\] to - /// obtain the service account ID. - /// - /// See [Enabling CMEK for Logs - /// Router]() - /// for more information. - #[prost(string, tag = "3")] - pub service_account_id: ::prost::alloc::string::String, -} -/// LogBucket lifecycle states. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum LifecycleState { - /// Unspecified state. This is only used/useful for distinguishing - /// unset values. - Unspecified = 0, - /// The normal and active state. - Active = 1, - /// The bucket has been marked for deletion by the user. - DeleteRequested = 2, -} -#[doc = r" Generated client implementations."] -pub mod config_service_v2_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] - use tonic::codegen::*; - #[doc = " Service for configuring sinks used to route log entries."] - #[derive(Debug, Clone)] - pub struct ConfigServiceV2Client { - inner: tonic::client::Grpc, - } - impl ConfigServiceV2Client { - #[doc = r" Attempt to create a new client by connecting to a given endpoint."] - pub async fn connect(dst: D) -> Result - where - D: std::convert::TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl ConfigServiceV2Client - where - T: tonic::client::GrpcService, - T::ResponseBody: Body + Send + 'static, - T::Error: Into, - ::Error: Into + Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> ConfigServiceV2Client> - where - F: tonic::service::Interceptor, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - >>::Error: - Into + Send + Sync, - { - ConfigServiceV2Client::new(InterceptedService::new(inner, interceptor)) - } - #[doc = r" Compress requests with `gzip`."] - #[doc = r""] - #[doc = r" This requires the server to support it otherwise it might respond with an"] - #[doc = r" error."] - pub fn send_gzip(mut self) -> Self { - self.inner = self.inner.send_gzip(); - self - } - #[doc = r" Enable decompressing responses with `gzip`."] - pub fn accept_gzip(mut self) -> Self { - self.inner = self.inner.accept_gzip(); - self - } - #[doc = " Lists buckets."] - pub async fn list_buckets( - &mut self, - request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/google.logging.v2.ConfigServiceV2/ListBuckets", - ); - self.inner.unary(request.into_request(), path, codec).await - } - #[doc = " Gets a bucket."] - pub async fn get_bucket( - &mut self, - request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/google.logging.v2.ConfigServiceV2/GetBucket", - ); - self.inner.unary(request.into_request(), path, codec).await - } - #[doc = " Creates a bucket that can be used to store log entries. Once a bucket has"] - #[doc = " been created, the region cannot be changed."] - pub async fn create_bucket( - &mut self, - request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/google.logging.v2.ConfigServiceV2/CreateBucket", - ); - self.inner.unary(request.into_request(), path, codec).await - } - #[doc = " Updates a bucket. This method replaces the following fields in the"] - #[doc = " existing bucket with values from the new bucket: `retention_period`"] - #[doc = ""] - #[doc = " If the retention period is decreased and the bucket is locked,"] - #[doc = " FAILED_PRECONDITION will be returned."] - #[doc = ""] - #[doc = " If the bucket has a LifecycleState of DELETE_REQUESTED, FAILED_PRECONDITION"] - #[doc = " will be returned."] - #[doc = ""] - #[doc = " A buckets region may not be modified after it is created."] - pub async fn update_bucket( - &mut self, - request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/google.logging.v2.ConfigServiceV2/UpdateBucket", - ); - self.inner.unary(request.into_request(), path, codec).await - } - #[doc = " Deletes a bucket."] - #[doc = " Moves the bucket to the DELETE_REQUESTED state. After 7 days, the"] - #[doc = " bucket will be purged and all logs in the bucket will be permanently"] - #[doc = " deleted."] - pub async fn delete_bucket( - &mut self, - request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/google.logging.v2.ConfigServiceV2/DeleteBucket", - ); - self.inner.unary(request.into_request(), path, codec).await - } - #[doc = " Undeletes a bucket. A bucket that has been deleted may be undeleted within"] - #[doc = " the grace period of 7 days."] - pub async fn undelete_bucket( - &mut self, - request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/google.logging.v2.ConfigServiceV2/UndeleteBucket", - ); - self.inner.unary(request.into_request(), path, codec).await - } - #[doc = " Lists views on a bucket."] - pub async fn list_views( - &mut self, - request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/google.logging.v2.ConfigServiceV2/ListViews", - ); - self.inner.unary(request.into_request(), path, codec).await - } - #[doc = " Gets a view."] - pub async fn get_view( - &mut self, - request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = - http::uri::PathAndQuery::from_static("/google.logging.v2.ConfigServiceV2/GetView"); - self.inner.unary(request.into_request(), path, codec).await - } - #[doc = " Creates a view over logs in a bucket. A bucket may contain a maximum of"] - #[doc = " 50 views."] - pub async fn create_view( - &mut self, - request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/google.logging.v2.ConfigServiceV2/CreateView", - ); - self.inner.unary(request.into_request(), path, codec).await - } - #[doc = " Updates a view. This method replaces the following fields in the existing"] - #[doc = " view with values from the new view: `filter`."] - pub async fn update_view( - &mut self, - request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/google.logging.v2.ConfigServiceV2/UpdateView", - ); - self.inner.unary(request.into_request(), path, codec).await - } - #[doc = " Deletes a view from a bucket."] - pub async fn delete_view( - &mut self, - request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/google.logging.v2.ConfigServiceV2/DeleteView", - ); - self.inner.unary(request.into_request(), path, codec).await - } - #[doc = " Lists sinks."] - pub async fn list_sinks( - &mut self, - request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/google.logging.v2.ConfigServiceV2/ListSinks", - ); - self.inner.unary(request.into_request(), path, codec).await - } - #[doc = " Gets a sink."] - pub async fn get_sink( - &mut self, - request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = - http::uri::PathAndQuery::from_static("/google.logging.v2.ConfigServiceV2/GetSink"); - self.inner.unary(request.into_request(), path, codec).await - } - #[doc = " Creates a sink that exports specified log entries to a destination. The"] - #[doc = " export of newly-ingested log entries begins immediately, unless the sink's"] - #[doc = " `writer_identity` is not permitted to write to the destination. A sink can"] - #[doc = " export log entries only from the resource owning the sink."] - pub async fn create_sink( - &mut self, - request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/google.logging.v2.ConfigServiceV2/CreateSink", - ); - self.inner.unary(request.into_request(), path, codec).await - } - #[doc = " Updates a sink. This method replaces the following fields in the existing"] - #[doc = " sink with values from the new sink: `destination`, and `filter`."] - #[doc = ""] - #[doc = " The updated sink might also have a new `writer_identity`; see the"] - #[doc = " `unique_writer_identity` field."] - pub async fn update_sink( - &mut self, - request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/google.logging.v2.ConfigServiceV2/UpdateSink", - ); - self.inner.unary(request.into_request(), path, codec).await - } - #[doc = " Deletes a sink. If the sink has a unique `writer_identity`, then that"] - #[doc = " service account is also deleted."] - pub async fn delete_sink( - &mut self, - request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/google.logging.v2.ConfigServiceV2/DeleteSink", - ); - self.inner.unary(request.into_request(), path, codec).await - } - #[doc = " Lists all the exclusions in a parent resource."] - pub async fn list_exclusions( - &mut self, - request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/google.logging.v2.ConfigServiceV2/ListExclusions", - ); - self.inner.unary(request.into_request(), path, codec).await - } - #[doc = " Gets the description of an exclusion."] - pub async fn get_exclusion( - &mut self, - request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/google.logging.v2.ConfigServiceV2/GetExclusion", - ); - self.inner.unary(request.into_request(), path, codec).await - } - #[doc = " Creates a new exclusion in a specified parent resource."] - #[doc = " Only log entries belonging to that resource can be excluded."] - #[doc = " You can have up to 10 exclusions in a resource."] - pub async fn create_exclusion( - &mut self, - request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/google.logging.v2.ConfigServiceV2/CreateExclusion", - ); - self.inner.unary(request.into_request(), path, codec).await - } - #[doc = " Changes one or more properties of an existing exclusion."] - pub async fn update_exclusion( - &mut self, - request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/google.logging.v2.ConfigServiceV2/UpdateExclusion", - ); - self.inner.unary(request.into_request(), path, codec).await - } - #[doc = " Deletes an exclusion."] - pub async fn delete_exclusion( - &mut self, - request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/google.logging.v2.ConfigServiceV2/DeleteExclusion", - ); - self.inner.unary(request.into_request(), path, codec).await - } - #[doc = " Gets the Logs Router CMEK settings for the given resource."] - #[doc = ""] - #[doc = " Note: CMEK for the Logs Router can currently only be configured for GCP"] - #[doc = " organizations. Once configured, it applies to all projects and folders in"] - #[doc = " the GCP organization."] - #[doc = ""] - #[doc = " See [Enabling CMEK for Logs"] - #[doc = " Router](https://cloud.google.com/logging/docs/routing/managed-encryption)"] - #[doc = " for more information."] - pub async fn get_cmek_settings( - &mut self, - request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/google.logging.v2.ConfigServiceV2/GetCmekSettings", - ); - self.inner.unary(request.into_request(), path, codec).await - } - #[doc = " Updates the Logs Router CMEK settings for the given resource."] - #[doc = ""] - #[doc = " Note: CMEK for the Logs Router can currently only be configured for GCP"] - #[doc = " organizations. Once configured, it applies to all projects and folders in"] - #[doc = " the GCP organization."] - #[doc = ""] - #[doc = " [UpdateCmekSettings][google.logging.v2.ConfigServiceV2.UpdateCmekSettings]"] - #[doc = " will fail if 1) `kms_key_name` is invalid, or 2) the associated service"] - #[doc = " account does not have the required"] - #[doc = " `roles/cloudkms.cryptoKeyEncrypterDecrypter` role assigned for the key, or"] - #[doc = " 3) access to the key is disabled."] - #[doc = ""] - #[doc = " See [Enabling CMEK for Logs"] - #[doc = " Router](https://cloud.google.com/logging/docs/routing/managed-encryption)"] - #[doc = " for more information."] - pub async fn update_cmek_settings( - &mut self, - request: impl tonic::IntoRequest, - ) -> Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/google.logging.v2.ConfigServiceV2/UpdateCmekSettings", - ); - self.inner.unary(request.into_request(), path, codec).await - } + /// + /// `\[LOG_ID\]` must be less than 512 characters long and can only include the + /// following characters: upper and lower case alphanumeric characters, + /// forward-slash, underscore, hyphen, and period. + /// + /// For backward compatibility, if `log_name` begins with a forward-slash, such + /// as `/projects/...`, then the log entry is ingested as usual, but the + /// forward-slash is removed. Listing the log entry will not show the leading + /// slash and filtering for a log name with a leading slash will never return + /// any results. + #[prost(string, tag = "12")] + pub log_name: ::prost::alloc::string::String, + /// Required. The monitored resource that produced this log entry. + /// + /// Example: a log entry that reports a database error would be associated with + /// the monitored resource designating the particular database that reported + /// the error. + #[prost(message, optional, tag = "8")] + pub resource: ::core::option::Option, + /// Optional. The time the event described by the log entry occurred. This time is used + /// to compute the log entry's age and to enforce the logs retention period. + /// If this field is omitted in a new log entry, then Logging assigns it the + /// current time. Timestamps have nanosecond accuracy, but trailing zeros in + /// the fractional seconds might be omitted when the timestamp is displayed. + /// + /// Incoming log entries must have timestamps that don't exceed the + /// [logs retention + /// period]() in + /// the past, and that don't exceed 24 hours in the future. Log entries outside + /// those time boundaries aren't ingested by Logging. + #[prost(message, optional, tag = "9")] + pub timestamp: ::core::option::Option<::prost_types::Timestamp>, + /// Output only. The time the log entry was received by Logging. + #[prost(message, optional, tag = "24")] + pub receive_timestamp: ::core::option::Option<::prost_types::Timestamp>, + /// Optional. The severity of the log entry. The default value is `LogSeverity.DEFAULT`. + #[prost(enumeration = "super::r#type::LogSeverity", tag = "10")] + pub severity: i32, + /// Optional. A unique identifier for the log entry. If you provide a value, then + /// Logging considers other log entries in the same project, with the same + /// `timestamp`, and with the same `insert_id` to be duplicates which are + /// removed in a single query result. However, there are no guarantees of + /// de-duplication in the export of logs. + /// + /// If the `insert_id` is omitted when writing a log entry, the Logging API + /// assigns its own unique identifier in this field. + /// + /// In queries, the `insert_id` is also used to order log entries that have + /// the same `log_name` and `timestamp` values. + #[prost(string, tag = "4")] + pub insert_id: ::prost::alloc::string::String, + /// Optional. Information about the HTTP request associated with this log entry, if + /// applicable. + #[prost(message, optional, tag = "7")] + pub http_request: ::core::option::Option, + /// Optional. A map of key, value pairs that provides additional information about the + /// log entry. The labels can be user-defined or system-defined. + /// + /// User-defined labels are arbitrary key, value pairs that you can use to + /// classify logs. + /// + /// System-defined labels are defined by GCP services for platform logs. + /// They have two components - a service namespace component and the + /// attribute name. For example: `compute.googleapis.com/resource_name`. + /// + /// Cloud Logging truncates label keys that exceed 512 B and label + /// values that exceed 64 KB upon their associated log entry being + /// written. The truncation is indicated by an ellipsis at the + /// end of the character string. + #[prost(map = "string, string", tag = "11")] + pub labels: + ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, + /// Optional. Information about an operation associated with the log entry, if + /// applicable. + #[prost(message, optional, tag = "15")] + pub operation: ::core::option::Option, + /// Optional. Resource name of the trace associated with the log entry, if any. If it + /// contains a relative resource name, the name is assumed to be relative to + /// `//tracing.googleapis.com`. Example: + /// `projects/my-projectid/traces/06796866738c859f2f19b7cfb3214824` + #[prost(string, tag = "22")] + pub trace: ::prost::alloc::string::String, + /// Optional. The span ID within the trace associated with the log entry. + /// + /// For Trace spans, this is the same format that the Trace API v2 uses: a + /// 16-character hexadecimal encoding of an 8-byte array, such as + /// `000000000000004a`. + #[prost(string, tag = "27")] + pub span_id: ::prost::alloc::string::String, + /// Optional. The sampling decision of the trace associated with the log entry. + /// + /// True means that the trace resource name in the `trace` field was sampled + /// for storage in a trace backend. False means that the trace was not sampled + /// for storage when this log entry was written, or the sampling decision was + /// unknown at the time. A non-sampled `trace` value is still useful as a + /// request correlation identifier. The default is False. + #[prost(bool, tag = "30")] + pub trace_sampled: bool, + /// Optional. Source code location information associated with the log entry, if any. + #[prost(message, optional, tag = "23")] + pub source_location: ::core::option::Option, + /// Optional. Information indicating this LogEntry is part of a sequence of multiple log + /// entries split from a single LogEntry. + #[prost(message, optional, tag = "35")] + pub split: ::core::option::Option, + /// The log entry payload, which can be one of multiple types. + #[prost(oneof = "log_entry::Payload", tags = "2, 3, 6")] + pub payload: ::core::option::Option, +} +/// Nested message and enum types in `LogEntry`. +pub mod log_entry { + /// The log entry payload, which can be one of multiple types. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Payload { + /// The log entry payload, represented as a protocol buffer. Some Google + /// Cloud Platform services use this field for their log entry payloads. + /// + /// The following protocol buffer types are supported; user-defined types + /// are not supported: + /// + /// "type.googleapis.com/google.cloud.audit.AuditLog" + /// "type.googleapis.com/google.appengine.logging.v1.RequestLog" + #[prost(message, tag = "2")] + ProtoPayload(::prost_types::Any), + /// The log entry payload, represented as a Unicode string (UTF-8). + #[prost(string, tag = "3")] + TextPayload(::prost::alloc::string::String), + /// The log entry payload, represented as a structure that is + /// expressed as a JSON object. + #[prost(message, tag = "6")] + JsonPayload(::prost_types::Struct), } } +/// Additional information about a potentially long-running operation with which +/// a log entry is associated. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LogEntryOperation { + /// Optional. An arbitrary operation identifier. Log entries with the same + /// identifier are assumed to be part of the same operation. + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Optional. An arbitrary producer identifier. The combination of `id` and + /// `producer` must be globally unique. Examples for `producer`: + /// `"MyDivision.MyBigCompany.com"`, `"github.com/MyProject/MyApplication"`. + #[prost(string, tag = "2")] + pub producer: ::prost::alloc::string::String, + /// Optional. Set this to True if this is the first log entry in the operation. + #[prost(bool, tag = "3")] + pub first: bool, + /// Optional. Set this to True if this is the last log entry in the operation. + #[prost(bool, tag = "4")] + pub last: bool, +} +/// Additional information about the source code location that produced the log +/// entry. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LogEntrySourceLocation { + /// Optional. Source file name. Depending on the runtime environment, this + /// might be a simple name or a fully-qualified name. + #[prost(string, tag = "1")] + pub file: ::prost::alloc::string::String, + /// Optional. Line within the source file. 1-based; 0 indicates no line number + /// available. + #[prost(int64, tag = "2")] + pub line: i64, + /// Optional. Human-readable name of the function or method being invoked, with + /// optional context such as the class or package name. This information may be + /// used in contexts such as the logs viewer, where a file and line number are + /// less meaningful. The format can vary by language. For example: + /// `qual.if.ied.Class.method` (Java), `dir/package.func` (Go), `function` + /// (Python). + #[prost(string, tag = "3")] + pub function: ::prost::alloc::string::String, +} +/// Additional information used to correlate multiple log entries. Used when a +/// single LogEntry would exceed the Google Cloud Logging size limit and is +/// split across multiple log entries. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LogSplit { + /// A globally unique identifier for all log entries in a sequence of split log + /// entries. All log entries with the same |LogSplit.uid| are assumed to be + /// part of the same sequence of split log entries. + #[prost(string, tag = "1")] + pub uid: ::prost::alloc::string::String, + /// The index of this LogEntry in the sequence of split log entries. Log + /// entries are given |index| values 0, 1, ..., n-1 for a sequence of n log + /// entries. + #[prost(int32, tag = "2")] + pub index: i32, + /// The total number of log entries that the original LogEntry was split into. + #[prost(int32, tag = "3")] + pub total_splits: i32, +} /// The parameters to DeleteLog. #[derive(Clone, PartialEq, ::prost::Message)] pub struct DeleteLogRequest { /// Required. The resource name of the log to delete: /// - /// "projects/\[PROJECT_ID]/logs/[LOG_ID\]" - /// "organizations/\[ORGANIZATION_ID]/logs/[LOG_ID\]" - /// "billingAccounts/\[BILLING_ACCOUNT_ID]/logs/[LOG_ID\]" - /// "folders/\[FOLDER_ID]/logs/[LOG_ID\]" + /// * `projects/\[PROJECT_ID]/logs/[LOG_ID\]` + /// * `organizations/\[ORGANIZATION_ID]/logs/[LOG_ID\]` + /// * `billingAccounts/\[BILLING_ACCOUNT_ID]/logs/[LOG_ID\]` + /// * `folders/\[FOLDER_ID]/logs/[LOG_ID\]` /// /// `\[LOG_ID\]` must be URL-encoded. For example, /// `"projects/my-project-id/logs/syslog"`, - /// `"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"`. + /// `"organizations/123/logs/cloudaudit.googleapis.com%2Factivity"`. + /// /// For more information about log names, see /// \[LogEntry][google.logging.v2.LogEntry\]. #[prost(string, tag = "1")] @@ -1563,15 +232,15 @@ pub struct WriteLogEntriesRequest { /// Optional. A default log resource name that is assigned to all log entries /// in `entries` that do not specify a value for `log_name`: /// - /// "projects/\[PROJECT_ID]/logs/[LOG_ID\]" - /// "organizations/\[ORGANIZATION_ID]/logs/[LOG_ID\]" - /// "billingAccounts/\[BILLING_ACCOUNT_ID]/logs/[LOG_ID\]" - /// "folders/\[FOLDER_ID]/logs/[LOG_ID\]" + /// * `projects/\[PROJECT_ID]/logs/[LOG_ID\]` + /// * `organizations/\[ORGANIZATION_ID]/logs/[LOG_ID\]` + /// * `billingAccounts/\[BILLING_ACCOUNT_ID]/logs/[LOG_ID\]` + /// * `folders/\[FOLDER_ID]/logs/[LOG_ID\]` /// /// `\[LOG_ID\]` must be URL-encoded. For example: /// /// "projects/my-project-id/logs/syslog" - /// "organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity" + /// "organizations/123/logs/cloudaudit.googleapis.com%2Factivity" /// /// The permission `logging.logEntries.create` is needed on each project, /// organization, billing account, or folder that is receiving new log @@ -1610,14 +279,14 @@ pub struct WriteLogEntriesRequest { /// the entries later in the list. See the `entries.list` method. /// /// Log entries with timestamps that are more than the - /// [logs retention period]() in + /// [logs retention period]() in /// the past or more than 24 hours in the future will not be available when /// calling `entries.list`. However, those log entries can still be [exported /// with /// LogSinks](). /// /// To improve throughput and to avoid exceeding the - /// [quota limit]() for calls to + /// [quota limit]() for calls to /// `entries.write`, you should try to include several log entries in this /// list, rather than calling this method for each individual log entry. #[prost(message, repeated, tag = "4")] @@ -1656,16 +325,17 @@ pub struct ListLogEntriesRequest { /// Required. Names of one or more parent resources from which to /// retrieve log entries: /// - /// "projects/\[PROJECT_ID\]" - /// "organizations/\[ORGANIZATION_ID\]" - /// "billingAccounts/\[BILLING_ACCOUNT_ID\]" - /// "folders/\[FOLDER_ID\]" + /// * `projects/\[PROJECT_ID\]` + /// * `organizations/\[ORGANIZATION_ID\]` + /// * `billingAccounts/\[BILLING_ACCOUNT_ID\]` + /// * `folders/\[FOLDER_ID\]` /// - /// May alternatively be one or more views - /// projects/\[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\] - /// organization/\[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\] - /// billingAccounts/\[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\] - /// folders/\[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\] + /// May alternatively be one or more views: + /// + /// * `projects/\[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` + /// * `organizations/\[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` + /// * `billingAccounts/\[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` + /// * `folders/\[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` /// /// Projects listed in the `project_ids` field are added to this list. #[prost(string, repeated, tag = "8")] @@ -1687,10 +357,10 @@ pub struct ListLogEntriesRequest { /// timestamps are returned in order of their `insert_id` values. #[prost(string, tag = "3")] pub order_by: ::prost::alloc::string::String, - /// Optional. The maximum number of results to return from this request. - /// Default is 50. If the value is negative or exceeds 1000, - /// the request is rejected. The presence of `next_page_token` in the - /// response indicates that more results might be available. + /// Optional. The maximum number of results to return from this request. Default is 50. + /// If the value is negative or exceeds 1000, the request is rejected. The + /// presence of `next_page_token` in the response indicates that more results + /// might be available. #[prost(int32, tag = "4")] pub page_size: i32, /// Optional. If present, then retrieve the next batch of results from the @@ -1754,10 +424,10 @@ pub struct ListMonitoredResourceDescriptorsResponse { pub struct ListLogsRequest { /// Required. The resource name that owns the logs: /// - /// "projects/\[PROJECT_ID\]" - /// "organizations/\[ORGANIZATION_ID\]" - /// "billingAccounts/\[BILLING_ACCOUNT_ID\]" - /// "folders/\[FOLDER_ID\]" + /// * `projects/\[PROJECT_ID\]` + /// * `organizations/\[ORGANIZATION_ID\]` + /// * `billingAccounts/\[BILLING_ACCOUNT_ID\]` + /// * `folders/\[FOLDER_ID\]` #[prost(string, tag = "1")] pub parent: ::prost::alloc::string::String, /// Optional. The maximum number of results to return from this request. @@ -1772,16 +442,18 @@ pub struct ListLogsRequest { #[prost(string, tag = "3")] pub page_token: ::prost::alloc::string::String, /// Optional. The resource name that owns the logs: - /// projects/\[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\] - /// organization/\[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\] - /// billingAccounts/\[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\] - /// folders/\[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\] + /// + /// * `projects/\[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` + /// * `organizations/\[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` + /// * `billingAccounts/\[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` + /// * `folders/\[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` /// /// To support legacy queries, it could also be: - /// "projects/\[PROJECT_ID\]" - /// "organizations/\[ORGANIZATION_ID\]" - /// "billingAccounts/\[BILLING_ACCOUNT_ID\]" - /// "folders/\[FOLDER_ID\]" + /// + /// * `projects/\[PROJECT_ID\]` + /// * `organizations/\[ORGANIZATION_ID\]` + /// * `billingAccounts/\[BILLING_ACCOUNT_ID\]` + /// * `folders/\[FOLDER_ID\]` #[prost(string, repeated, tag = "8")] pub resource_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } @@ -1804,16 +476,17 @@ pub struct ListLogsResponse { pub struct TailLogEntriesRequest { /// Required. Name of a parent resource from which to retrieve log entries: /// - /// "projects/\[PROJECT_ID\]" - /// "organizations/\[ORGANIZATION_ID\]" - /// "billingAccounts/\[BILLING_ACCOUNT_ID\]" - /// "folders/\[FOLDER_ID\]" + /// * `projects/\[PROJECT_ID\]` + /// * `organizations/\[ORGANIZATION_ID\]` + /// * `billingAccounts/\[BILLING_ACCOUNT_ID\]` + /// * `folders/\[FOLDER_ID\]` /// /// May alternatively be one or more views: - /// "projects/\[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]" - /// "organization/\[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]" - /// "billingAccounts/\[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]" - /// "folders/\[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]" + /// + /// * `projects/\[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` + /// * `organizations/\[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` + /// * `billingAccounts/\[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` + /// * `folders/\[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` #[prost(string, repeated, tag = "1")] pub resource_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, /// Optional. A filter that chooses which log entries to return. See [Advanced @@ -1881,17 +554,17 @@ pub mod tail_log_entries_response { } } } -#[doc = r" Generated client implementations."] +/// Generated client implementations. pub mod logging_service_v2_client { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; - #[doc = " Service for ingesting and querying logs."] + /// Service for ingesting and querying logs. #[derive(Debug, Clone)] pub struct LoggingServiceV2Client { inner: tonic::client::Grpc, } impl LoggingServiceV2Client { - #[doc = r" Attempt to create a new client by connecting to a given endpoint."] + /// Attempt to create a new client by connecting to a given endpoint. pub async fn connect(dst: D) -> Result where D: std::convert::TryInto, @@ -1904,8 +577,8 @@ pub mod logging_service_v2_client { impl LoggingServiceV2Client where T: tonic::client::GrpcService, - T::ResponseBody: Body + Send + 'static, T::Error: Into, + T::ResponseBody: Body + Send + 'static, ::Error: Into + Send, { pub fn new(inner: T) -> Self { @@ -1918,6 +591,7 @@ pub mod logging_service_v2_client { ) -> LoggingServiceV2Client> where F: tonic::service::Interceptor, + T::ResponseBody: Default, T: tonic::codegen::Service< http::Request, Response = http::Response< @@ -1929,23 +603,25 @@ pub mod logging_service_v2_client { { LoggingServiceV2Client::new(InterceptedService::new(inner, interceptor)) } - #[doc = r" Compress requests with `gzip`."] - #[doc = r""] - #[doc = r" This requires the server to support it otherwise it might respond with an"] - #[doc = r" error."] + /// Compress requests with `gzip`. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] pub fn send_gzip(mut self) -> Self { self.inner = self.inner.send_gzip(); self } - #[doc = r" Enable decompressing responses with `gzip`."] + /// Enable decompressing responses with `gzip`. + #[must_use] pub fn accept_gzip(mut self) -> Self { self.inner = self.inner.accept_gzip(); self } - #[doc = " Deletes all the log entries in a log. The log reappears if it receives new"] - #[doc = " entries. Log entries written shortly before the delete operation might not"] - #[doc = " be deleted. Entries received after the delete operation with a timestamp"] - #[doc = " before the operation will be deleted."] + /// Deletes all the log entries in a log for the _Default Log Bucket. The log + /// reappears if it receives new entries. Log entries written shortly before + /// the delete operation might not be deleted. Entries received after the + /// delete operation with a timestamp before the operation will be deleted. pub async fn delete_log( &mut self, request: impl tonic::IntoRequest, @@ -1962,13 +638,13 @@ pub mod logging_service_v2_client { ); self.inner.unary(request.into_request(), path, codec).await } - #[doc = " Writes log entries to Logging. This API method is the"] - #[doc = " only way to send log entries to Logging. This method"] - #[doc = " is used, directly or indirectly, by the Logging agent"] - #[doc = " (fluentd) and all logging libraries configured to use Logging."] - #[doc = " A single request may contain log entries for a maximum of 1000"] - #[doc = " different resources (projects, organizations, billing accounts or"] - #[doc = " folders)"] + /// Writes log entries to Logging. This API method is the + /// only way to send log entries to Logging. This method + /// is used, directly or indirectly, by the Logging agent + /// (fluentd) and all logging libraries configured to use Logging. + /// A single request may contain log entries for a maximum of 1000 + /// different resources (projects, organizations, billing accounts or + /// folders) pub async fn write_log_entries( &mut self, request: impl tonic::IntoRequest, @@ -1985,10 +661,10 @@ pub mod logging_service_v2_client { ); self.inner.unary(request.into_request(), path, codec).await } - #[doc = " Lists log entries. Use this method to retrieve log entries that originated"] - #[doc = " from a project/folder/organization/billing account. For ways to export log"] - #[doc = " entries, see [Exporting"] - #[doc = " Logs](https://cloud.google.com/logging/docs/export)."] + /// Lists log entries. Use this method to retrieve log entries that originated + /// from a project/folder/organization/billing account. For ways to export log + /// entries, see [Exporting + /// Logs](https://cloud.google.com/logging/docs/export). pub async fn list_log_entries( &mut self, request: impl tonic::IntoRequest, @@ -2005,7 +681,7 @@ pub mod logging_service_v2_client { ); self.inner.unary(request.into_request(), path, codec).await } - #[doc = " Lists the descriptors for monitored resource types used by Logging."] + /// Lists the descriptors for monitored resource types used by Logging. pub async fn list_monitored_resource_descriptors( &mut self, request: impl tonic::IntoRequest, @@ -2023,8 +699,8 @@ pub mod logging_service_v2_client { ); self.inner.unary(request.into_request(), path, codec).await } - #[doc = " Lists the logs in projects, organizations, folders, or billing accounts."] - #[doc = " Only logs that have entries are listed."] + /// Lists the logs in projects, organizations, folders, or billing accounts. + /// Only logs that have entries are listed. pub async fn list_logs( &mut self, request: impl tonic::IntoRequest, @@ -2041,8 +717,8 @@ pub mod logging_service_v2_client { ); self.inner.unary(request.into_request(), path, codec).await } - #[doc = " Streaming read of log entries as they are ingested. Until the stream is"] - #[doc = " terminated, it will continue reading logs."] + /// Streaming read of log entries as they are ingested. Until the stream is + /// terminated, it will continue reading logs. pub async fn tail_log_entries( &mut self, request: impl tonic::IntoStreamingRequest, diff --git a/opentelemetry-stackdriver/tests/generate.rs b/opentelemetry-stackdriver/tests/generate.rs index 82b3c35728..ed2d95f7e9 100644 --- a/opentelemetry-stackdriver/tests/generate.rs +++ b/opentelemetry-stackdriver/tests/generate.rs @@ -2,6 +2,7 @@ use std::collections::HashMap; use std::ffi::OsStr; use std::fs; use std::path::PathBuf; +use std::process::Command; use futures::stream::FuturesUnordered; use futures::stream::StreamExt; @@ -53,7 +54,6 @@ fn generated_code_is_fresh() { tonic_build::configure() .build_client(true) .build_server(false) - .format(which::which("rustfmt").is_ok()) .out_dir(&tmp_dir) .compile(&schemas, &["proto"]) .unwrap(); @@ -63,6 +63,15 @@ fn generated_code_is_fresh() { let (mut modules, mut renames) = (Vec::new(), Vec::new()); for entry in fs::read_dir(&tmp_dir).unwrap() { let path = entry.unwrap().path(); + + // Tonic now uses prettyplease instead of rustfmt, which causes a + // number of differences in the generated code. + Command::new("rustfmt") + .arg("--edition=2021") + .arg(&path) + .output() + .unwrap(); + let file_name_str = path.file_name().and_then(|s| s.to_str()).unwrap(); let (base, _) = file_name_str .strip_prefix("google.") @@ -173,6 +182,14 @@ fn generated_code_is_fresh() { previous = parent; } + while level > 0 { + level -= 1; + for _ in 0..(level * 4) { + root.push(' '); + } + root.push_str("}\n"); + } + fs::write(tmp_dir.path().join("mod.rs"), root).unwrap(); // Move on to actually comparing the old and new versions. diff --git a/opentelemetry-zipkin/Cargo.toml b/opentelemetry-zipkin/Cargo.toml index 79be6ae9a7..15099d35df 100644 --- a/opentelemetry-zipkin/Cargo.toml +++ b/opentelemetry-zipkin/Cargo.toml @@ -27,20 +27,21 @@ surf-client = ["surf", "opentelemetry-http/surf"] [dependencies] async-trait = "0.1" -opentelemetry = { version = "0.18.0", path = "../opentelemetry", features = ["trace"] } +opentelemetry = { version = "0.18", path = "../opentelemetry", features = ["trace"] } opentelemetry-http = { version = "0.7", path = "../opentelemetry-http", optional = true } opentelemetry-semantic-conventions = { version = "0.10", path = "../opentelemetry-semantic-conventions" } serde_json = "1.0" serde = { version = "1.0", features = ["derive"] } typed-builder = "0.9" -lazy_static = "1.4" +once_cell = "1.12" http = "0.2" reqwest = { version = "0.11", optional = true, default-features = false } surf = { version = "2.0", optional = true, default-features = false } thiserror = { version = "1.0"} +futures-core = "0.3" [dev-dependencies] bytes = "1" futures-util = { version = "0.3", features = ["io"] } -isahc = "1.4" +hyper = "0.14" opentelemetry = { default-features = false, features = ["trace", "testing"], path = "../opentelemetry" } diff --git a/opentelemetry-zipkin/README.md b/opentelemetry-zipkin/README.md index 019af93a90..e34561647d 100644 --- a/opentelemetry-zipkin/README.md +++ b/opentelemetry-zipkin/README.md @@ -107,7 +107,7 @@ available so be sure to match them appropriately. ## Supported Rust Versions OpenTelemetry is built against the latest stable release. The minimum supported -version is 1.46. The current OpenTelemetry version is not guaranteed to build on +version is 1.49. The current OpenTelemetry version is not guaranteed to build on Rust versions earlier than the minimum supported version. The current stable Rust compiler and the three most recent minor versions before diff --git a/opentelemetry-zipkin/src/exporter/mod.rs b/opentelemetry-zipkin/src/exporter/mod.rs index 0218e3a325..02e6f60503 100644 --- a/opentelemetry-zipkin/src/exporter/mod.rs +++ b/opentelemetry-zipkin/src/exporter/mod.rs @@ -3,6 +3,7 @@ mod model; mod uploader; use async_trait::async_trait; +use futures_core::future::BoxFuture; use http::Uri; use model::endpoint::Endpoint; use opentelemetry::sdk::resource::ResourceDetector; @@ -18,6 +19,7 @@ use opentelemetry::{ }; use opentelemetry_http::HttpClient; use opentelemetry_semantic_conventions as semcov; +use std::borrow::Cow; #[cfg(all( not(feature = "reqwest-client"), not(feature = "reqwest-blocking-client"), @@ -36,7 +38,7 @@ pub struct Exporter { } impl Exporter { - fn new(local_endpoint: Endpoint, client: Box, collector_endpoint: Uri) -> Self { + fn new(local_endpoint: Endpoint, client: Arc, collector_endpoint: Uri) -> Self { Exporter { local_endpoint, uploader: uploader::Uploader::new(client, collector_endpoint), @@ -56,7 +58,7 @@ pub struct ZipkinPipelineBuilder { service_addr: Option, collector_endpoint: String, trace_config: Option, - client: Option>, + client: Option>, } impl Default for ZipkinPipelineBuilder { @@ -64,7 +66,7 @@ impl Default for ZipkinPipelineBuilder { let timeout = env::get_timeout(); ZipkinPipelineBuilder { #[cfg(feature = "reqwest-blocking-client")] - client: Some(Box::new( + client: Some(Arc::new( reqwest::blocking::Client::builder() .timeout(timeout) .build() @@ -75,7 +77,7 @@ impl Default for ZipkinPipelineBuilder { not(feature = "surf-client"), feature = "reqwest-client" ))] - client: Some(Box::new( + client: Some(Arc::new( reqwest::Client::builder() .timeout(timeout) .build() @@ -86,7 +88,7 @@ impl Default for ZipkinPipelineBuilder { not(feature = "reqwest-blocking-client"), feature = "surf-client" ))] - client: Some(Box::new( + client: Some(Arc::new( surf::Client::try_from(surf::Config::new().set_timeout(Some(timeout))) .unwrap_or_else(|_| surf::Client::new()), )), @@ -118,18 +120,17 @@ impl ZipkinPipelineBuilder { let service_name = self.service_name.take(); if let Some(service_name) = service_name { let config = if let Some(mut cfg) = self.trace_config.take() { - cfg.resource = cfg.resource.map(|r| { - let without_service_name = r + cfg.resource = Cow::Owned(Resource::new( + cfg.resource .iter() .filter(|(k, _v)| **k != semcov::resource::SERVICE_NAME) .map(|(k, v)| KeyValue::new(k.clone(), v.clone())) - .collect::>(); - Arc::new(Resource::new(without_service_name)) - }); + .collect::>(), + )); cfg } else { Config { - resource: Some(Arc::new(Resource::empty())), + resource: Cow::Owned(Resource::empty()), ..Default::default() } }; @@ -143,7 +144,7 @@ impl ZipkinPipelineBuilder { ( Config { // use a empty resource to prevent TracerProvider to assign a service name. - resource: Some(Arc::new(Resource::empty())), + resource: Cow::Owned(Resource::empty()), ..Default::default() }, Endpoint::new(service_name, self.service_addr), @@ -212,7 +213,7 @@ impl ZipkinPipelineBuilder { /// Assign client implementation pub fn with_http_client(mut self, client: T) -> Self { - self.client = Some(Box::new(client)); + self.client = Some(Arc::new(client)); self } @@ -235,16 +236,28 @@ impl ZipkinPipelineBuilder { } } +async fn zipkin_export( + batch: Vec, + uploader: uploader::Uploader, + local_endpoint: Endpoint, +) -> trace::ExportResult { + let zipkin_spans = batch + .into_iter() + .map(|span| model::into_zipkin_span(local_endpoint.clone(), span)) + .collect(); + + uploader.upload(zipkin_spans).await +} + #[async_trait] impl trace::SpanExporter for Exporter { /// Export spans to Zipkin collector. - async fn export(&mut self, batch: Vec) -> trace::ExportResult { - let zipkin_spans = batch - .into_iter() - .map(|span| model::into_zipkin_span(self.local_endpoint.clone(), span)) - .collect(); - - self.uploader.upload(zipkin_spans).await + fn export(&mut self, batch: Vec) -> BoxFuture<'static, trace::ExportResult> { + Box::pin(zipkin_export( + batch, + self.uploader.clone(), + self.local_endpoint.clone(), + )) } } diff --git a/opentelemetry-zipkin/src/exporter/model/span.rs b/opentelemetry-zipkin/src/exporter/model/span.rs index 2360a6f531..8c1e782e51 100644 --- a/opentelemetry-zipkin/src/exporter/model/span.rs +++ b/opentelemetry-zipkin/src/exporter/model/span.rs @@ -61,7 +61,9 @@ mod tests { use crate::exporter::model::{into_zipkin_span, OTEL_ERROR_DESCRIPTION, OTEL_STATUS_CODE}; use opentelemetry::sdk::export::trace::SpanData; use opentelemetry::sdk::trace::{EvictedHashMap, EvictedQueue}; + use opentelemetry::sdk::Resource; use opentelemetry::trace::{SpanContext, SpanId, SpanKind, Status, TraceFlags, TraceId}; + use std::borrow::Cow; use std::collections::HashMap; use std::net::Ipv4Addr; use std::time::SystemTime; @@ -164,7 +166,7 @@ mod tests { events: EvictedQueue::new(20), links: EvictedQueue::new(20), status, - resource: None, + resource: Cow::Owned(Resource::default()), instrumentation_lib: Default::default(), }; let local_endpoint = Endpoint::new("test".into(), None); diff --git a/opentelemetry-zipkin/src/exporter/uploader.rs b/opentelemetry-zipkin/src/exporter/uploader.rs index cd790e47b0..9ab843034b 100644 --- a/opentelemetry-zipkin/src/exporter/uploader.rs +++ b/opentelemetry-zipkin/src/exporter/uploader.rs @@ -5,15 +5,16 @@ use http::{header::CONTENT_TYPE, Method, Request, Uri}; use opentelemetry::sdk::export::trace::ExportResult; use opentelemetry_http::{HttpClient, ResponseExt}; use std::fmt::Debug; +use std::sync::Arc; -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) enum Uploader { Http(JsonV2Client), } impl Uploader { /// Create a new http uploader - pub(crate) fn new(client: Box, collector_endpoint: Uri) -> Self { + pub(crate) fn new(client: Arc, collector_endpoint: Uri) -> Self { Uploader::Http(JsonV2Client { client, collector_endpoint, @@ -28,9 +29,9 @@ impl Uploader { } } -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) struct JsonV2Client { - client: Box, + client: Arc, collector_endpoint: Uri, } diff --git a/opentelemetry-zipkin/src/lib.rs b/opentelemetry-zipkin/src/lib.rs index 6dcb2ce443..9c08842ce9 100644 --- a/opentelemetry-zipkin/src/lib.rs +++ b/opentelemetry-zipkin/src/lib.rs @@ -98,31 +98,39 @@ //! use http::{Request, Response}; //! use std::convert::TryInto as _; //! use std::error::Error; +//! use hyper::{client::HttpConnector, Body}; //! //! // `reqwest` and `surf` are supported through features, if you prefer an //! // alternate http client you can add support by implementing `HttpClient` as //! // shown here. //! #[derive(Debug)] -//! struct IsahcClient(isahc::HttpClient); +//! struct HyperClient(hyper::Client); //! //! #[async_trait] -//! impl HttpClient for IsahcClient { -//! async fn send(&self, request: Request>) -> Result, HttpError> { -//! let mut response = self.0.send_async(request).await?; -//! let status = response.status(); -//! let mut bytes = Vec::with_capacity(response.body().len().unwrap_or(0).try_into()?); -//! isahc::AsyncReadResponseExt::copy_to(&mut response, &mut bytes).await?; -//! -//! Ok(Response::builder() -//! .status(response.status()) -//! .body(bytes.into())?) +//! impl HttpClient for HyperClient { +//! async fn send(&self, req: Request>) -> Result, HttpError> { +//! let resp = self +//! .0 +//! .request(req.map(|v| Body::from(v))) +//! .await?; +//! +//! let response = Response::builder() +//! .status(resp.status()) +//! .body({ +//! hyper::body::to_bytes(resp.into_body()) +//! .await +//! .expect("cannot decode response") +//! }) +//! .expect("cannot build response"); +//! +//! Ok(response) //! } //! } //! //! fn main() -> Result<(), Box> { //! global::set_text_map_propagator(opentelemetry_zipkin::Propagator::new()); //! let tracer = opentelemetry_zipkin::new_pipeline() -//! .with_http_client(IsahcClient(isahc::HttpClient::new()?)) +//! .with_http_client(HyperClient(hyper::Client::new())) //! .with_service_name("my_app") //! .with_service_address("127.0.0.1:8080".parse()?) //! .with_collector_endpoint("http://localhost:9411/api/v2/spans") diff --git a/opentelemetry-zipkin/src/propagator/mod.rs b/opentelemetry-zipkin/src/propagator/mod.rs index 3c52096f99..9f4d1ecb32 100644 --- a/opentelemetry-zipkin/src/propagator/mod.rs +++ b/opentelemetry-zipkin/src/propagator/mod.rs @@ -13,6 +13,7 @@ //! //! If `inject_encoding` is set to `B3Encoding::SingleHeader` then `b3` header is used to inject //! and extract. Otherwise, separate headers are used to inject and extract. +use once_cell::sync::Lazy; use opentelemetry::{ propagation::{text_map_propagator::FieldIter, Extractor, Injector, TextMapPropagator}, trace::{SpanContext, SpanId, TraceContextExt, TraceFlags, TraceId, TraceState}, @@ -33,11 +34,24 @@ const B3_PARENT_SPAN_ID_HEADER: &str = "x-b3-parentspanid"; const TRACE_FLAG_DEFERRED: TraceFlags = TraceFlags::new(0x02); const TRACE_FLAG_DEBUG: TraceFlags = TraceFlags::new(0x04); -lazy_static::lazy_static! { - static ref B3_SINGLE_FIELDS: [String; 1] = [B3_SINGLE_HEADER.to_string()]; - static ref B3_MULTI_FIELDS: [String; 4] = [B3_TRACE_ID_HEADER.to_string(), B3_SPAN_ID_HEADER.to_string(), B3_SAMPLED_HEADER.to_string(), B3_DEBUG_FLAG_HEADER.to_string()]; - static ref B3_SINGLE_AND_MULTI_FIELDS: [String; 5] = [B3_SINGLE_HEADER.to_string(), B3_TRACE_ID_HEADER.to_string(), B3_SPAN_ID_HEADER.to_string(), B3_SAMPLED_HEADER.to_string(), B3_DEBUG_FLAG_HEADER.to_string()]; -} +static B3_SINGLE_FIELDS: Lazy<[String; 1]> = Lazy::new(|| [B3_SINGLE_HEADER.to_owned()]); +static B3_MULTI_FIELDS: Lazy<[String; 4]> = Lazy::new(|| { + [ + B3_TRACE_ID_HEADER.to_owned(), + B3_SPAN_ID_HEADER.to_owned(), + B3_SAMPLED_HEADER.to_owned(), + B3_DEBUG_FLAG_HEADER.to_owned(), + ] +}); +static B3_SINGLE_AND_MULTI_FIELDS: Lazy<[String; 5]> = Lazy::new(|| { + [ + B3_SINGLE_HEADER.to_owned(), + B3_TRACE_ID_HEADER.to_owned(), + B3_SPAN_ID_HEADER.to_owned(), + B3_SAMPLED_HEADER.to_owned(), + B3_DEBUG_FLAG_HEADER.to_owned(), + ] +}); /// B3Encoding is a bitmask to represent B3 encoding type #[derive(Clone, Debug)] diff --git a/opentelemetry-zpages/Cargo.toml b/opentelemetry-zpages/Cargo.toml index e5010c6a40..79e8be83fb 100644 --- a/opentelemetry-zpages/Cargo.toml +++ b/opentelemetry-zpages/Cargo.toml @@ -24,7 +24,6 @@ opentelemetry-proto = { version = "0.1", path = "../opentelemetry-proto", featur async-channel = "1.6" futures-channel = "0.3" futures-util = { version = "0.3", default-features = false, features = ["std"] } -lazy_static = "1.4.0" serde = "1.0" serde_json = "1.0" diff --git a/opentelemetry-zpages/src/lib.rs b/opentelemetry-zpages/src/lib.rs index 29390a4b43..03022239f3 100644 --- a/opentelemetry-zpages/src/lib.rs +++ b/opentelemetry-zpages/src/lib.rs @@ -66,6 +66,3 @@ mod trace; pub use trace::{ span_processor::ZPagesSpanProcessor, tracez, TracezError, TracezQuerier, TracezResponse, }; - -#[macro_use] -extern crate lazy_static; diff --git a/opentelemetry-zpages/src/trace/aggregator.rs b/opentelemetry-zpages/src/trace/aggregator.rs index efba7ba4bb..acebf31846 100644 --- a/opentelemetry-zpages/src/trace/aggregator.rs +++ b/opentelemetry-zpages/src/trace/aggregator.rs @@ -16,19 +16,17 @@ use crate::SpanQueue; use opentelemetry::sdk::export::trace::SpanData; use opentelemetry_proto::grpcio::tracez::TracezCounts; -lazy_static! { - static ref LATENCY_BUCKET: [Duration; 9] = [ - Duration::from_micros(0), - Duration::from_micros(10), - Duration::from_micros(100), - Duration::from_millis(1), - Duration::from_millis(10), - Duration::from_millis(100), - Duration::from_secs(1), - Duration::from_secs(10), - Duration::from_secs(100), - ]; -} +const LATENCY_BUCKET: [Duration; 9] = [ + Duration::from_micros(0), + Duration::from_micros(10), + Duration::from_micros(100), + Duration::from_millis(1), + Duration::from_millis(10), + Duration::from_millis(100), + Duration::from_secs(1), + Duration::from_secs(10), + Duration::from_secs(100), +]; const LATENCY_BUCKET_COUNT: usize = 9; /// Aggregate span information from `ZPagesSpanProcessor` and feed that information to server when @@ -166,8 +164,8 @@ fn latency_bucket(start_time: SystemTime, end_time: SystemTime) -> usize { - start_time .duration_since(UNIX_EPOCH) .unwrap_or_else(|_| Duration::from_millis(0)); - for idx in 1..LATENCY_BUCKET.len() { - if LATENCY_BUCKET[idx] > latency { + for (idx, lower) in LATENCY_BUCKET.iter().copied().enumerate().skip(1) { + if lower > latency { return (idx - 1) as usize; } } diff --git a/opentelemetry/README.md b/opentelemetry/README.md index dfa8be5c21..c48c897edb 100644 --- a/opentelemetry/README.md +++ b/opentelemetry/README.md @@ -130,7 +130,7 @@ above, please let us know! We'd love to add your project to the list! ## Supported Rust Versions OpenTelemetry is built against the latest stable release. The minimum supported -version is 1.46. The current OpenTelemetry version is not guaranteed to build +version is 1.49. The current OpenTelemetry version is not guaranteed to build on Rust versions earlier than the minimum supported version. The current stable Rust compiler and the three most recent minor versions diff --git a/scripts/test.sh b/scripts/test.sh index a7a3b9d399..3e3b6559ea 100755 --- a/scripts/test.sh +++ b/scripts/test.sh @@ -2,7 +2,7 @@ set -eu -cargo test --all "$@" +cargo test --all "$@" -- --test-threads=1 # See https://github.com/rust-lang/cargo/issues/5364 cargo test --manifest-path=opentelemetry/Cargo.toml --no-default-features From ba7450f1fbf100671edb59379d5942e58cf9a775 Mon Sep 17 00:00:00 2001 From: Julian Tescher Date: Sun, 3 Jul 2022 19:22:17 -0700 Subject: [PATCH 5/6] Update changelogs for new PRs --- opentelemetry-contrib/CHANGELOG.md | 4 ++++ opentelemetry-datadog/CHANGELOG.md | 2 ++ opentelemetry-http/CHANGELOG.md | 1 + opentelemetry-jaeger/CHANGELOG.md | 5 +++++ opentelemetry-otlp/CHANGELOG.md | 2 ++ opentelemetry-stackdriver/CHANGELOG.md | 1 + opentelemetry/CHANGELOG.md | 11 ++++++++++- 7 files changed, 25 insertions(+), 1 deletion(-) diff --git a/opentelemetry-contrib/CHANGELOG.md b/opentelemetry-contrib/CHANGELOG.md index 9f760071fe..d9bdf9007c 100644 --- a/opentelemetry-contrib/CHANGELOG.md +++ b/opentelemetry-contrib/CHANGELOG.md @@ -2,6 +2,10 @@ ## v0.10.0 +### Added + +- Add jaeger JSON file exporter #814 + ### Changed - Rename binary propagator's functions #776 diff --git a/opentelemetry-datadog/CHANGELOG.md b/opentelemetry-datadog/CHANGELOG.md index 2048cb155f..d83e8d2849 100644 --- a/opentelemetry-datadog/CHANGELOG.md +++ b/opentelemetry-datadog/CHANGELOG.md @@ -8,6 +8,8 @@ - Update to opentelemetry v0.18.0 - Update to opentelemetry-http v0.7.0 - Update to opentelemetry-semantic-conventions v0.10.0 +- Parse config endpoint to remove tailing slash #787 +- Add sampling priority tag in spans #792 ## v0.5.0 diff --git a/opentelemetry-http/CHANGELOG.md b/opentelemetry-http/CHANGELOG.md index a44baa1b17..84b0d05be9 100644 --- a/opentelemetry-http/CHANGELOG.md +++ b/opentelemetry-http/CHANGELOG.md @@ -5,6 +5,7 @@ ### Changed - Update to opentelemetry v0.18.0 +- Export `byte` and `http` types #798 ## v0.6.0 diff --git a/opentelemetry-jaeger/CHANGELOG.md b/opentelemetry-jaeger/CHANGELOG.md index 93c4fac65d..cca062918e 100644 --- a/opentelemetry-jaeger/CHANGELOG.md +++ b/opentelemetry-jaeger/CHANGELOG.md @@ -11,6 +11,11 @@ - Update to opentelemetry-http v0.7.0 - Update to opentelemetry-semantic-conventions v0.10.0 +### Fixed + +- Fix clearing span context in Propagator #810 +- Fix reqwest client runs inside a non-tokio runtime #829 + ## v0.16.0 ### Changed diff --git a/opentelemetry-otlp/CHANGELOG.md b/opentelemetry-otlp/CHANGELOG.md index 3deb0044b7..e3e133019a 100644 --- a/opentelemetry-otlp/CHANGELOG.md +++ b/opentelemetry-otlp/CHANGELOG.md @@ -7,6 +7,8 @@ - reduce `tokio` feature requirements #750 - Update to opentelemetry v0.18.0 - Update to opentelemetry-http v0.7.0 +- Update `tonic` to 0.7 #783 +- Automatically add traces / metrics paths #806 ## v0.10.0 diff --git a/opentelemetry-stackdriver/CHANGELOG.md b/opentelemetry-stackdriver/CHANGELOG.md index 5a3024a592..65c50ddd38 100644 --- a/opentelemetry-stackdriver/CHANGELOG.md +++ b/opentelemetry-stackdriver/CHANGELOG.md @@ -10,6 +10,7 @@ - Upgrade to opentelemetry v0.18.0 - Upgrade to opentelemetry-semantic-conventions v0.10 +- update tonic and prost #825 ## v0.14.0 diff --git a/opentelemetry/CHANGELOG.md b/opentelemetry/CHANGELOG.md index 45ea12179a..393bfff8aa 100644 --- a/opentelemetry/CHANGELOG.md +++ b/opentelemetry/CHANGELOG.md @@ -12,6 +12,8 @@ and SDK are still unstable. - Add `schema_url` to `Tracer` #743 - Add `schema_url` to `Resource` #775 - Add `Span::set_attributes` #638 +- Support concurrent exports #781 +- Add jaeger remote sampler #797 ### Changed @@ -24,12 +26,19 @@ and SDK are still unstable. - rename `Span::record_exception` to `Span::record_error` #756 - Replace `StatusCode` and `message` with `Status` #760 - Move `TracerProvider::force_flush` to SDK #658 +- Switch to static resource references #790 +- Allow O(1) get operations for `SpanBuilder::attributes` [breaking] #799 +- Allow ref counted keys and values #821 + +### Fixed + +- Update dashmap to avoid soundness hole #818 ### Removed - Remove `serialize` feature #738 - Remove `StatusCode::as_str` #741 -- Remove `Tracer::with_span` #746 +- Remove `Tracer::with_span` #746 ## [v0.17.0](https://github.com/open-telemetry/opentelemetry-rust/compare/v0.16.0...v0.17.0) From 09cab1257f4e3c1fa1873babb9730a02ce63640c Mon Sep 17 00:00:00 2001 From: Julian Tescher Date: Sun, 11 Sep 2022 13:35:17 -0700 Subject: [PATCH 6/6] Update changelogs for recent PRs --- opentelemetry-aws/CHANGELOG.md | 4 ++++ opentelemetry-http/CHANGELOG.md | 1 + opentelemetry-jaeger/CHANGELOG.md | 7 +++++++ opentelemetry-stackdriver/CHANGELOG.md | 5 +++++ opentelemetry/CHANGELOG.md | 9 +++++++++ 5 files changed, 26 insertions(+) diff --git a/opentelemetry-aws/CHANGELOG.md b/opentelemetry-aws/CHANGELOG.md index a2b361c10f..bb1780a58c 100644 --- a/opentelemetry-aws/CHANGELOG.md +++ b/opentelemetry-aws/CHANGELOG.md @@ -7,6 +7,10 @@ - reduce `tokio` feature requirements #750 - Update to opentelemetry v0.18.0 +### Fixed + +- Fix XrayPropagator when no header is present #867 + ## v0.5.0 ### Changed diff --git a/opentelemetry-http/CHANGELOG.md b/opentelemetry-http/CHANGELOG.md index 84b0d05be9..de933577f9 100644 --- a/opentelemetry-http/CHANGELOG.md +++ b/opentelemetry-http/CHANGELOG.md @@ -6,6 +6,7 @@ - Update to opentelemetry v0.18.0 - Export `byte` and `http` types #798 +- Implementation of collector http client with pure hyper #853 ## v0.6.0 diff --git a/opentelemetry-jaeger/CHANGELOG.md b/opentelemetry-jaeger/CHANGELOG.md index b1e627066b..97b607d53c 100644 --- a/opentelemetry-jaeger/CHANGELOG.md +++ b/opentelemetry-jaeger/CHANGELOG.md @@ -2,6 +2,13 @@ ## v0.17.0 +### Added + +- Support rustls in jaeger reqwest collector #834 +- Customisation support in Jaeger Propagator. #852 +- Add IPv6 support for Jaeger agent addresses #856 +- Add `with_batch_processor_config` for jaeger pipline #869 + ### Changed - Consolidate the config errors #762 diff --git a/opentelemetry-stackdriver/CHANGELOG.md b/opentelemetry-stackdriver/CHANGELOG.md index 65c50ddd38..68b42237c8 100644 --- a/opentelemetry-stackdriver/CHANGELOG.md +++ b/opentelemetry-stackdriver/CHANGELOG.md @@ -5,6 +5,7 @@ ### Added - Added mappings from OTel attributes to Google Cloud Traces #744 +- Added `MonitoredResource::CloudRunRevision` #847 ### Changed @@ -12,6 +13,10 @@ - Upgrade to opentelemetry-semantic-conventions v0.10 - update tonic and prost #825 +### Fixed + +- Fix `LogEntry.trace` not populated correctly #850 + ## v0.14.0 ### Changed diff --git a/opentelemetry/CHANGELOG.md b/opentelemetry/CHANGELOG.md index 393bfff8aa..e57cff5630 100644 --- a/opentelemetry/CHANGELOG.md +++ b/opentelemetry/CHANGELOG.md @@ -14,6 +14,8 @@ and SDK are still unstable. - Add `Span::set_attributes` #638 - Support concurrent exports #781 - Add jaeger remote sampler #797 +- Allow Custom Samplers #833 +- Add `SpanExporter::force_flush` and default implementation #845 ### Changed @@ -29,10 +31,17 @@ and SDK are still unstable. - Switch to static resource references #790 - Allow O(1) get operations for `SpanBuilder::attributes` [breaking] #799 - Allow ref counted keys and values #821 +- Bump MSRV from 1.49 to 1.55 #811 +- bump MSRV to 1.56 #866 +- Update metrics API and SDK for latest spec #819 +- Switch to `pin-project-lite` #830 ### Fixed - Update dashmap to avoid soundness hole #818 +- Perform sampling as explained in the specification #839 +- Remove internal message queue between exporter and exporting tasks #848 +- Fix span processor exporting unsampled spans #871 ### Removed